Merge "audio policy: enhancing type conversion helper"
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 572fb72..78a1b58 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -160,6 +160,7 @@
virtual void onDeviceError(CameraErrorCode errorCode,
const CaptureResultExtras& resultExtras) {
+ (void) resultExtras;
ALOGE("%s: onDeviceError occurred with: %d", __FUNCTION__, static_cast<int>(errorCode));
Mutex::Autolock l(mLock);
mError = true;
@@ -177,6 +178,8 @@
virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
int64_t timestamp) {
+ (void) resultExtras;
+ (void) timestamp;
Mutex::Autolock l(mLock);
mLastStatus = RUNNING;
mStatusesHit.push_back(mLastStatus);
@@ -186,6 +189,8 @@
virtual void onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) {
+ (void) metadata;
+ (void) resultExtras;
Mutex::Autolock l(mLock);
mLastStatus = SENT_RESULT;
mStatusesHit.push_back(mLastStatus);
@@ -193,6 +198,7 @@
}
virtual void onPrepared(int streamId) {
+ (void) streamId;
Mutex::Autolock l(mLock);
mLastStatus = PREPARED;
mStatusesHit.push_back(mLastStatus);
@@ -465,6 +471,7 @@
callbacks->clearStatus();
int requestId3 = device->submitRequestList(requestList, /*streaming*/false,
/*out*/&lastFrameNumber);
+ EXPECT_LE(0, requestId3);
EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
EXPECT_TRUE(callbacks->waitForIdle());
EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index c4c7b0e..c47a4e7 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -20,6 +20,7 @@
#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/IAudioRecord.h>
+#include <media/Modulo.h>
#include <utils/threads.h>
namespace android {
@@ -526,7 +527,7 @@
// caller must hold lock on mLock for all _l methods
- status_t openRecord_l(size_t epoch, const String16& opPackageName);
+ status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
// FIXME enum is faster than strcmp() for parameter 'from'
status_t restoreRecord_l(const char *from);
@@ -556,9 +557,9 @@
bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
uint32_t mObservedSequence; // last observed value of mSequence
- uint32_t mMarkerPosition; // in wrapping (overflow) frame units
+ Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
bool mMarkerReached;
- uint32_t mNewPosition; // in frames
+ Modulo<uint32_t> mNewPosition; // in frames
uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
status_t mStatus;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e02f1b7..fe4611c 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -22,6 +22,7 @@
#include <media/AudioTimestamp.h>
#include <media/IAudioTrack.h>
#include <media/AudioResamplerPublic.h>
+#include <media/Modulo.h>
#include <utils/threads.h>
namespace android {
@@ -798,7 +799,7 @@
{ return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
// increment mPosition by the delta of mServer, and return new value of mPosition
- uint32_t updateAndGetPosition_l();
+ Modulo<uint32_t> updateAndGetPosition_l();
// check sample rate and speed is compatible with AudioTrack
bool isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
@@ -885,19 +886,19 @@
bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
uint32_t mObservedSequence; // last observed value of mSequence
- uint32_t mMarkerPosition; // in wrapping (overflow) frame units
+ Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
bool mMarkerReached;
- uint32_t mNewPosition; // in frames
+ Modulo<uint32_t> mNewPosition; // in frames
uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
- uint32_t mServer; // in frames, last known mProxy->getPosition()
+ Modulo<uint32_t> mServer; // in frames, last known mProxy->getPosition()
// which is count of frames consumed by server,
// reset by new IAudioTrack,
// whether it is reset by stop() is TBD
- uint32_t mPosition; // in frames, like mServer except continues
+ Modulo<uint32_t> mPosition; // in frames, like mServer except continues
// monotonically after new IAudioTrack,
// and could be easily widened to uint64_t
- uint32_t mReleased; // in frames, count of frames released to server
+ Modulo<uint32_t> mReleased; // count of frames released to server
// but not necessarily consumed by server,
// reset by stop() but continues monotonically
// after new IAudioTrack to restore mPosition,
diff --git a/include/media/Modulo.h b/include/media/Modulo.h
new file mode 100644
index 0000000..23280ac
--- /dev/null
+++ b/include/media/Modulo.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MODULO_H
+#define ANDROID_MODULO_H
+
+namespace android {
+
+// Modulo class is used for intentionally wrapping variables such as
+// counters and timers.
+//
+// It may also be used for variables whose computation depends on the
+// associativity of addition or subtraction.
+//
+// Features:
+// 1) Modulo checks type sizes before performing operations to ensure
+// that the wrap points match. This is critical for safe modular arithmetic.
+// 2) Modulo returns Modulo types from arithmetic operations, thereby
+// avoiding unintentional use in a non-modular computation. A Modulo
+// type is converted to its base non-Modulo type through the value() function.
+// 3) Modulo separates out overflowable types from non-overflowable types.
+// A signed overflow is technically undefined in C and C++.
+// Modulo types do not participate in sanitization.
+// 4) Modulo comparisons are based on signed differences to account for wrap;
+// this is not the same as the direct comparison of values.
+// 5) Safe use of binary arithmetic operations relies on conversions of
+// signed operands to unsigned operands (which are modular arithmetic safe).
+// Conversions which are implementation-defined are assumed to use 2's complement
+// representation. (See A, B, C, D from the ISO/IEC FDIS 14882
+// Information technology — Programming languages — C++).
+//
+// A: ISO/IEC 14882:2011(E) p84 section 4.7 Integral conversions
+// (2) If the destination type is unsigned, the resulting value is the least unsigned
+// integer congruent to the source integer (modulo 2^n where n is the number of bits
+// used to represent the unsigned type). [ Note: In a two’s complement representation,
+// this conversion is conceptual and there is no change in the bit pattern (if there
+// is no truncation). — end note ]
+// (3) If the destination type is signed, the value is unchanged if it can be represented
+// in the destination type (and bit-field width); otherwise, the value is
+// implementation-defined.
+//
+// B: ISO/IEC 14882:2011(E) p88 section 5 Expressions
+// (9) Many binary operators that expect operands of arithmetic or enumeration type
+// cause conversions and yield result types in a similar way. The purpose is to
+// yield a common type, which is also the type of the result. This pattern is called
+// the usual arithmetic conversions, which are defined as follows:
+// [...]
+// Otherwise, if both operands have signed integer types or both have unsigned
+// integer types, the operand with the type of lesser integer conversion rank shall be
+// converted to the type of the operand with greater rank.
+// — Otherwise, if the operand that has unsigned integer type has rank greater than
+// or equal to the rank of the type of the other operand, the operand with signed
+// integer type shall be converted to the type of the operand with unsigned integer type.
+//
+// C: ISO/IEC 14882:2011(E) p86 section 4.13 Integer conversion rank
+// [...] The rank of long long int shall be greater than the rank of long int,
+// which shall be greater than the rank of int, which shall be greater than the
+// rank of short int, which shall be greater than the rank of signed char.
+// — The rank of any unsigned integer type shall equal the rank of the corresponding
+// signed integer type.
+//
+// D: ISO/IEC 14882:2011(E) p75 section 3.9.1 Fundamental types
+// [...] Unsigned integers, declared unsigned, shall obey the laws of arithmetic modulo
+// 2^n where n is the number of bits in the value representation of that particular
+// size of integer.
+//
+// Note:
+// Other libraries do exist for safe integer operations which can detect the
+// possibility of overflow (SafeInt from MS and safe-iop in android).
+// Signed safe computation is also possible from the art header safe_math.h.
+
+template <typename T> class Modulo {
+ T mValue;
+
+public:
+ typedef typename std::make_signed<T>::type signedT;
+ typedef typename std::make_unsigned<T>::type unsignedT;
+
+ Modulo() { } // intentionally uninitialized data
+ Modulo(const T &value) { mValue = value; }
+ const T & value() const { return mValue; } // not assignable
+ signedT signedValue() const { return mValue; }
+ unsignedT unsignedValue() const { return mValue; }
+ void getValue(T *value) const { *value = mValue; } // more type safe than value()
+
+ // modular operations valid only if size of T <= size of S.
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ Modulo<T> operator +=(const Modulo<S> &other) {
+ static_assert(sizeof(T) <= sizeof(S), "argument size mismatch");
+ mValue += other.unsignedValue();
+ return *this;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ Modulo<T> operator -=(const Modulo<S> &other) {
+ static_assert(sizeof(T) <= sizeof(S), "argument size mismatch");
+ mValue -= other.unsignedValue();
+ return *this;
+ }
+
+ // modular operations resulting in a value valid only at the smaller of the two
+ // Modulo base type sizes, but we only allow equal sizes to avoid confusion.
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ const Modulo<T> operator +(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return Modulo<T>(mValue + other.unsignedValue());
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ const Modulo<T> operator -(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return Modulo<T>(mValue - other.unsignedValue());
+ }
+
+ // modular operations that should be checked only at the smaller of
+ // the two type sizes, but we only allow equal sizes to avoid confusion.
+ //
+ // Caution: These relational and comparison operations are not equivalent to
+ // the base type operations.
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ bool operator >(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return static_cast<signedT>(mValue - other.unsignedValue()) > 0;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ bool operator >=(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return static_cast<signedT>(mValue - other.unsignedValue()) >= 0;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ bool operator ==(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return static_cast<signedT>(mValue - other.unsignedValue()) == 0;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ bool operator <=(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return static_cast<signedT>(mValue - other.unsignedValue()) <= 0;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ bool operator <(const Modulo<S> &other) const {
+ static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+ return static_cast<signedT>(mValue - other.unsignedValue()) < 0;
+ }
+
+
+ // modular operations with a non-Modulo type allowed with wrapping
+ // because there should be no confusion as to the meaning.
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ Modulo<T> operator +=(const S &other) {
+ mValue += unsignedT(other);
+ return *this;
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ Modulo<T> operator -=(const S &other) {
+ mValue -= unsignedT(other);
+ return *this;
+ }
+
+ // modular operations with a non-Modulo type allowed with wrapping,
+ // but we restrict this only when size of T is greater than or equal to
+ // the size of S to avoid confusion with the nature of overflow.
+ //
+ // Use of this follows left-associative style.
+ //
+ // Note: a Modulo type may be promoted by using "differences" off of
+ // a larger sized type, but we do not automate this.
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ const Modulo<T> operator +(const S &other) const {
+ static_assert(sizeof(T) >= sizeof(S), "argument size mismatch");
+ return Modulo<T>(mValue + unsignedT(other));
+ }
+
+ template <typename S>
+ __attribute__((no_sanitize("integer")))
+ const Modulo<T> operator -(const S &other) const {
+ static_assert(sizeof(T) >= sizeof(S), "argument size mismatch");
+ return Modulo<T>(mValue - unsignedT(other));
+ }
+
+ // multiply is intentionally omitted, but it is a common operator in
+ // modular arithmetic.
+
+ // shift operations are intentionally omitted, but perhaps useful.
+ // For example, left-shifting a negative number is undefined in C++11.
+};
+
+} // namespace android
+
+#endif /* ANDROID_MODULO_H */
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 1e5064f..1f3880f 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -26,6 +26,7 @@
#include <utils/RefBase.h>
#include <audio_utils/roundup.h>
#include <media/AudioResamplerPublic.h>
+#include <media/Modulo.h>
#include <media/SingleStateQueue.h>
namespace android {
@@ -280,11 +281,11 @@
// Call to force an obtainBuffer() to return quickly with -EINTR
void interrupt();
- size_t getPosition() {
+ Modulo<uint32_t> getPosition() {
return mEpoch + mCblk->mServer;
}
- void setEpoch(size_t epoch) {
+ void setEpoch(const Modulo<uint32_t> &epoch) {
mEpoch = epoch;
}
@@ -300,14 +301,14 @@
// in order for the client to be aligned at start of buffer
virtual size_t getMisalignment();
- size_t getEpoch() const {
+ Modulo<uint32_t> getEpoch() const {
return mEpoch;
}
size_t getFramesFilled();
private:
- size_t mEpoch;
+ Modulo<uint32_t> mEpoch;
};
// ----------------------------------------------------------------------------
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 8ffcd4b..1c0d904 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -396,7 +396,7 @@
}
AutoMutex lock(mLock);
- *marker = mMarkerPosition;
+ mMarkerPosition.getValue(marker);
return NO_ERROR;
}
@@ -438,7 +438,7 @@
}
AutoMutex lock(mLock);
- *position = mProxy->getPosition();
+ mProxy->getPosition().getValue(position);
return NO_ERROR;
}
@@ -480,7 +480,7 @@
// -------------------------------------------------------------------------
// must be called with mLock held
-status_t AudioRecord::openRecord_l(size_t epoch, const String16& opPackageName)
+status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
@@ -890,23 +890,23 @@
}
// Get current position of server
- size_t position = mProxy->getPosition();
+ Modulo<uint32_t> position(mProxy->getPosition());
// Manage marker callback
bool markerReached = false;
- size_t markerPosition = mMarkerPosition;
+ Modulo<uint32_t> markerPosition(mMarkerPosition);
// FIXME fails for wraparound, need 64 bits
- if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+ if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
mMarkerReached = markerReached = true;
}
// Determine the number of new position callback(s) that will be needed, while locked
size_t newPosCount = 0;
- size_t newPosition = mNewPosition;
+ Modulo<uint32_t> newPosition(mNewPosition);
uint32_t updatePeriod = mUpdatePeriod;
// FIXME fails for wraparound, need 64 bits
if (updatePeriod > 0 && position >= newPosition) {
- newPosCount = ((position - newPosition) / updatePeriod) + 1;
+ newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
mNewPosition += updatePeriod * newPosCount;
}
@@ -933,7 +933,7 @@
mCbf(EVENT_MARKER, mUserData, &markerPosition);
}
while (newPosCount > 0) {
- size_t temp = newPosition;
+ size_t temp = newPosition.value(); // FIXME size_t != uint32_t
mCbf(EVENT_NEW_POS, mUserData, &temp);
newPosition += updatePeriod;
newPosCount--;
@@ -951,10 +951,10 @@
// Compute the estimated time until the next timed event (position, markers)
uint32_t minFrames = ~0;
if (!markerReached && position < markerPosition) {
- minFrames = markerPosition - position;
+ minFrames = (markerPosition - position).value();
}
if (updatePeriod > 0) {
- uint32_t remaining = newPosition - position;
+ uint32_t remaining = (newPosition - position).value();
if (remaining < minFrames) {
minFrames = remaining;
}
@@ -1087,7 +1087,7 @@
// if the new IAudioRecord is created, openRecord_l() will modify the
// following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
// It will also delete the strong references on previous IAudioRecord and IMemory
- size_t position = mProxy->getPosition();
+ Modulo<uint32_t> position(mProxy->getPosition());
mNewPosition = position + mUpdatePeriod;
status_t result = openRecord_l(position, mOpPackageName);
if (result == NO_ERROR) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 82b6736..5e14940 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -920,7 +920,7 @@
}
AutoMutex lock(mLock);
- *marker = mMarkerPosition;
+ mMarkerPosition.getValue(marker);
return NO_ERROR;
}
@@ -1018,7 +1018,7 @@
// IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
*position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
- 0 : updateAndGetPosition_l();
+ 0 : updateAndGetPosition_l().value();
}
return NO_ERROR;
}
@@ -1774,23 +1774,23 @@
}
// Get current position of server
- size_t position = updateAndGetPosition_l();
+ Modulo<uint32_t> position(updateAndGetPosition_l());
// Manage marker callback
bool markerReached = false;
- size_t markerPosition = mMarkerPosition;
- // FIXME fails for wraparound, need 64 bits
- if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+ Modulo<uint32_t> markerPosition(mMarkerPosition);
+ // uses 32 bit wraparound for comparison with position.
+ if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
mMarkerReached = markerReached = true;
}
// Determine number of new position callback(s) that will be needed, while locked
size_t newPosCount = 0;
- size_t newPosition = mNewPosition;
- size_t updatePeriod = mUpdatePeriod;
+ Modulo<uint32_t> newPosition(mNewPosition);
+ uint32_t updatePeriod = mUpdatePeriod;
// FIXME fails for wraparound, need 64 bits
if (updatePeriod > 0 && position >= newPosition) {
- newPosCount = ((position - newPosition) / updatePeriod) + 1;
+ newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
mNewPosition += updatePeriod * newPosCount;
}
@@ -1891,7 +1891,7 @@
mCbf(EVENT_MARKER, mUserData, &markerPosition);
}
while (newPosCount > 0) {
- size_t temp = newPosition;
+ size_t temp = newPosition.value(); // FIXME size_t != uint32_t
mCbf(EVENT_NEW_POS, mUserData, &temp);
newPosition += updatePeriod;
newPosCount--;
@@ -1915,14 +1915,14 @@
// FIXME only for non-compressed audio
uint32_t minFrames = ~0;
if (!markerReached && position < markerPosition) {
- minFrames = markerPosition - position;
+ minFrames = (markerPosition - position).value();
}
if (loopPeriod > 0 && loopPeriod < minFrames) {
// loopPeriod is already adjusted for actual position.
minFrames = loopPeriod;
}
if (updatePeriod > 0) {
- minFrames = min(minFrames, uint32_t(newPosition - position));
+ minFrames = min(minFrames, (newPosition - position).value());
}
// If > 0, poll periodically to recover from a stuck server. A good value is 2.
@@ -2157,11 +2157,11 @@
return result;
}
-uint32_t AudioTrack::updateAndGetPosition_l()
+Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
{
// This is the sole place to read server consumed frames
- uint32_t newServer = mProxy->getPosition();
- uint32_t delta = newServer > mServer ? newServer - mServer : 0;
+ Modulo<uint32_t> newServer(mProxy->getPosition());
+ const int32_t delta = (newServer - mServer).signedValue();
// TODO There is controversy about whether there can be "negative jitter" in server position.
// This should be investigated further, and if possible, it should be addressed.
// A more definite failure mode is infrequent polling by client.
@@ -2170,12 +2170,14 @@
// That should ensure delta never goes negative for infrequent polling
// unless the server has more than 2^31 frames in its buffer,
// in which case the use of uint32_t for these counters has bigger issues.
- if (newServer < mServer) {
- ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d",
- (int32_t) newServer - mServer);
- }
+ ALOGE_IF(delta < 0,
+ "detected illegal retrograde motion by the server: mServer advanced by %d",
+ delta);
mServer = newServer;
- return mPosition += delta;
+ if (delta > 0) { // avoid retrograde
+ mPosition += delta;
+ }
+ return mPosition;
}
bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
@@ -2197,7 +2199,6 @@
return mAudioTrack->setParameters(keyValuePairs);
}
-__attribute__((no_sanitize("integer")))
status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
{
AutoMutex lock(mLock);
@@ -2310,15 +2311,19 @@
// If this delta between these is greater than the client position, it means that
// actually presented is still stuck at the starting line (figuratively speaking),
// waiting for the first frame to go by. So we can't report a valid timestamp yet.
- if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
+ // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
+ // mPosition exceeds 32 bits.
+ // TODO Remove when timestamp is updated to contain pipeline status info.
+ const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
+ if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
+ && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
return INVALID_OPERATION;
}
// Convert timestamp position from server time base to client time base.
// TODO The following code should work OK now because timestamp.mPosition is 32-bit.
// But if we change it to 64-bit then this could fail.
- // Split this out instead of using += to prevent unsigned overflow
- // checks in the outer sum.
- timestamp.mPosition = timestamp.mPosition + static_cast<int32_t>(mPosition) - mServer;
+ // Use Modulo computation here.
+ timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
// Immediately after a call to getPosition_l(), mPosition and
// mServer both represent the same frame position. mPosition is
// in client's point of view, and mServer is in server's point of
@@ -2332,9 +2337,9 @@
// This is sometimes caused by erratic reports of the available space in the ALSA drivers.
if (status == NO_ERROR) {
if (previousTimestampValid) {
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
- const uint64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
- const uint64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
+#define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
+ const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
+ const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
#undef TIME_TO_NANOS
if (currentTimeNanos < previousTimeNanos) {
ALOGW("retrograde timestamp time");
@@ -2343,8 +2348,8 @@
// Looking at signed delta will work even when the timestamps
// are wrapping around.
- int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
- - mPreviousTimestamp.mPosition);
+ int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
+ - mPreviousTimestamp.mPosition).signedValue();
// position can bobble slightly as an artifact; this hides the bobble
static const int32_t MINIMUM_POSITION_DELTA = 8;
if (deltaPosition < 0) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index bf91134..be59bf2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1552,10 +1552,9 @@
mDrainAudioQueuePending = false;
mDrainVideoQueuePending = false;
- if (mHasAudio) {
- mAudioSink->pause();
- startAudioOffloadPauseTimeout();
- }
+ // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+ mAudioSink->pause();
+ startAudioOffloadPauseTimeout();
ALOGV("now paused audio queue has %zu entries, video has %zu entries",
mAudioQueue.size(), mVideoQueue.size());
@@ -1566,8 +1565,9 @@
return;
}
- if (mHasAudio) {
- cancelAudioOffloadPauseTimeout();
+ // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+ cancelAudioOffloadPauseTimeout();
+ if (mAudioSink->ready()) {
status_t err = mAudioSink->start();
if (err != OK) {
ALOGE("cannot start AudioSink err %d", err);
diff --git a/media/libstagefright/ProcessInfo.cpp b/media/libstagefright/ProcessInfo.cpp
index b4172b3..353f108 100644
--- a/media/libstagefright/ProcessInfo.cpp
+++ b/media/libstagefright/ProcessInfo.cpp
@@ -32,19 +32,23 @@
sp<IProcessInfoService> service = interface_cast<IProcessInfoService>(binder);
size_t length = 1;
- int32_t states;
- status_t err = service->getProcessStatesFromPids(length, &pid, &states);
+ int32_t state;
+ static const int32_t INVALID_ADJ = -10000;
+ static const int32_t NATIVE_ADJ = -1000;
+ int32_t score = INVALID_ADJ;
+ status_t err = service->getProcessStatesAndOomScoresFromPids(length, &pid, &state, &score);
if (err != OK) {
- ALOGE("getProcessStatesFromPids failed");
+ ALOGE("getProcessStatesAndOomScoresFromPids failed");
return false;
}
- ALOGV("pid %d states %d", pid, states);
- if (states < 0) {
+ ALOGV("pid %d state %d score %d", pid, state, score);
+ if (score <= NATIVE_ADJ) {
+ ALOGE("pid %d invalid OOM adjustments value %d", pid, score);
return false;
}
- // Use process state as the priority. Lower the value, higher the priority.
- *priority = states;
+ // Use OOM adjustments value as the priority. Lower the value, higher the priority.
+ *priority = score;
return true;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
index 1ecc11f..8704ce5 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
@@ -115,6 +115,7 @@
);
+__attribute__((no_sanitize("integer")))
void ACELP_4t64_fx(
Word16 dn[], /* (i) <12b : correlation between target x[] and H[] */
Word16 cn[], /* (i) <12b : residual after long term prediction */
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 8240f83..f2a4e65 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -37,6 +37,10 @@
#include <inttypes.h>
+#ifndef INT32_MAX
+#define INT32_MAX 2147483647
+#endif
+
namespace android {
template<class T>
@@ -137,6 +141,11 @@
if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
// Color conversion is needed.
free(mInputFrameData);
+ mInputFrameData = NULL;
+ if (((uint64_t)mWidth * mHeight) > ((uint64_t)INT32_MAX / 3)) {
+ ALOGE("b/25812794, Buffer size is too big.");
+ return OMX_ErrorBadParameter;
+ }
mInputFrameData =
(uint8_t *) malloc((mWidth * mHeight * 3 ) >> 1);
CHECK(mInputFrameData != NULL);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index e654843..410f9d0 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -26,6 +26,10 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
+#ifndef INT32_MAX
+#define INT32_MAX 2147483647
+#endif
+
namespace android {
template<class T>
@@ -315,6 +319,11 @@
if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
free(mConversionBuffer);
+ mConversionBuffer = NULL;
+ if (((uint64_t)mWidth * mHeight) > ((uint64_t)INT32_MAX / 3)) {
+ ALOGE("b/25812794, Buffer size is too big.");
+ return UNKNOWN_ERROR;
+ }
mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
if (mConversionBuffer == NULL) {
ALOGE("Allocating conversion buffer failed.");
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 01117e6..f8b913a 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -133,6 +133,10 @@
printf("no MTP string\n");
}
}
+#else
+ else {
+ continue;
+ }
#endif
// if we got here, then we have a likely MTP or PTP device
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3d4d120..553970b 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -358,54 +358,54 @@
audio_devices_t mDevices;
const char * mString;
} mappingsOut[] = {
- AUDIO_DEVICE_OUT_EARPIECE, "EARPIECE",
- AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER",
- AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET",
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE",
- AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "BLUETOOTH_SCO",
- AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET",
- AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, "BLUETOOTH_SCO_CARKIT",
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, "BLUETOOTH_A2DP",
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, "BLUETOOTH_A2DP_HEADPHONES",
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, "BLUETOOTH_A2DP_SPEAKER",
- AUDIO_DEVICE_OUT_AUX_DIGITAL, "AUX_DIGITAL",
- AUDIO_DEVICE_OUT_HDMI, "HDMI",
- AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
- AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
- AUDIO_DEVICE_OUT_USB_ACCESSORY, "USB_ACCESSORY",
- AUDIO_DEVICE_OUT_USB_DEVICE, "USB_DEVICE",
- AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX",
- AUDIO_DEVICE_OUT_LINE, "LINE",
- AUDIO_DEVICE_OUT_HDMI_ARC, "HDMI_ARC",
- AUDIO_DEVICE_OUT_SPDIF, "SPDIF",
- AUDIO_DEVICE_OUT_FM, "FM",
- AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE",
- AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE",
- AUDIO_DEVICE_OUT_IP, "IP",
- AUDIO_DEVICE_NONE, "NONE", // must be last
+ {AUDIO_DEVICE_OUT_EARPIECE, "EARPIECE"},
+ {AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER"},
+ {AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET"},
+ {AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "BLUETOOTH_SCO"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, "BLUETOOTH_SCO_CARKIT"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,"BLUETOOTH_A2DP_HEADPHONES"},
+ {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, "BLUETOOTH_A2DP_SPEAKER"},
+ {AUDIO_DEVICE_OUT_AUX_DIGITAL, "AUX_DIGITAL"},
+ {AUDIO_DEVICE_OUT_HDMI, "HDMI"},
+ {AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,"ANLG_DOCK_HEADSET"},
+ {AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,"DGTL_DOCK_HEADSET"},
+ {AUDIO_DEVICE_OUT_USB_ACCESSORY, "USB_ACCESSORY"},
+ {AUDIO_DEVICE_OUT_USB_DEVICE, "USB_DEVICE"},
+ {AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX"},
+ {AUDIO_DEVICE_OUT_LINE, "LINE"},
+ {AUDIO_DEVICE_OUT_HDMI_ARC, "HDMI_ARC"},
+ {AUDIO_DEVICE_OUT_SPDIF, "SPDIF"},
+ {AUDIO_DEVICE_OUT_FM, "FM"},
+ {AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE"},
+ {AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE"},
+ {AUDIO_DEVICE_OUT_IP, "IP"},
+ {AUDIO_DEVICE_NONE, "NONE"}, // must be last
}, mappingsIn[] = {
- AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION",
- AUDIO_DEVICE_IN_AMBIENT, "AMBIENT",
- AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC",
- AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET",
- AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET",
- AUDIO_DEVICE_IN_AUX_DIGITAL, "AUX_DIGITAL",
- AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL",
- AUDIO_DEVICE_IN_TELEPHONY_RX, "TELEPHONY_RX",
- AUDIO_DEVICE_IN_BACK_MIC, "BACK_MIC",
- AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX",
- AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
- AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
- AUDIO_DEVICE_IN_USB_ACCESSORY, "USB_ACCESSORY",
- AUDIO_DEVICE_IN_USB_DEVICE, "USB_DEVICE",
- AUDIO_DEVICE_IN_FM_TUNER, "FM_TUNER",
- AUDIO_DEVICE_IN_TV_TUNER, "TV_TUNER",
- AUDIO_DEVICE_IN_LINE, "LINE",
- AUDIO_DEVICE_IN_SPDIF, "SPDIF",
- AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP",
- AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK",
- AUDIO_DEVICE_IN_IP, "IP",
- AUDIO_DEVICE_NONE, "NONE", // must be last
+ {AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION"},
+ {AUDIO_DEVICE_IN_AMBIENT, "AMBIENT"},
+ {AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC"},
+ {AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
+ {AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET"},
+ {AUDIO_DEVICE_IN_AUX_DIGITAL, "AUX_DIGITAL"},
+ {AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL"},
+ {AUDIO_DEVICE_IN_TELEPHONY_RX, "TELEPHONY_RX"},
+ {AUDIO_DEVICE_IN_BACK_MIC, "BACK_MIC"},
+ {AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX"},
+ {AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET"},
+ {AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET"},
+ {AUDIO_DEVICE_IN_USB_ACCESSORY, "USB_ACCESSORY"},
+ {AUDIO_DEVICE_IN_USB_DEVICE, "USB_DEVICE"},
+ {AUDIO_DEVICE_IN_FM_TUNER, "FM_TUNER"},
+ {AUDIO_DEVICE_IN_TV_TUNER, "TV_TUNER"},
+ {AUDIO_DEVICE_IN_LINE, "LINE"},
+ {AUDIO_DEVICE_IN_SPDIF, "SPDIF"},
+ {AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
+ {AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK"},
+ {AUDIO_DEVICE_IN_IP, "IP"},
+ {AUDIO_DEVICE_NONE, "NONE"}, // must be last
};
String8 result;
audio_devices_t allDevices = AUDIO_DEVICE_NONE;
@@ -443,11 +443,11 @@
audio_input_flags_t mFlag;
const char * mString;
} mappings[] = {
- AUDIO_INPUT_FLAG_FAST, "FAST",
- AUDIO_INPUT_FLAG_HW_HOTWORD, "HW_HOTWORD",
- AUDIO_INPUT_FLAG_RAW, "RAW",
- AUDIO_INPUT_FLAG_SYNC, "SYNC",
- AUDIO_INPUT_FLAG_NONE, "NONE", // must be last
+ {AUDIO_INPUT_FLAG_FAST, "FAST"},
+ {AUDIO_INPUT_FLAG_HW_HOTWORD, "HW_HOTWORD"},
+ {AUDIO_INPUT_FLAG_RAW, "RAW"},
+ {AUDIO_INPUT_FLAG_SYNC, "SYNC"},
+ {AUDIO_INPUT_FLAG_NONE, "NONE"}, // must be last
};
String8 result;
audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
@@ -479,17 +479,17 @@
audio_output_flags_t mFlag;
const char * mString;
} mappings[] = {
- AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT",
- AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY",
- AUDIO_OUTPUT_FLAG_FAST, "FAST",
- AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER",
- AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAD",
- AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING",
- AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC",
- AUDIO_OUTPUT_FLAG_RAW, "RAW",
- AUDIO_OUTPUT_FLAG_SYNC, "SYNC",
- AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO",
- AUDIO_OUTPUT_FLAG_NONE, "NONE", // must be last
+ {AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT"},
+ {AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY"},
+ {AUDIO_OUTPUT_FLAG_FAST, "FAST"},
+ {AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER"},
+ {AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,"COMPRESS_OFFLOAD"},
+ {AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING"},
+ {AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC"},
+ {AUDIO_OUTPUT_FLAG_RAW, "RAW"},
+ {AUDIO_OUTPUT_FLAG_SYNC, "SYNC"},
+ {AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO"},
+ {AUDIO_OUTPUT_FLAG_NONE, "NONE"}, // must be last
};
String8 result;
audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
@@ -3855,7 +3855,10 @@
// because we're about to decrement the last sp<> on those tracks.
block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
} else {
- LOG_ALWAYS_FATAL("fast track %d should have been active", j);
+ LOG_ALWAYS_FATAL("fast track %d should have been active; "
+ "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
+ j, track->mState, state->mTrackMask, recentUnderruns,
+ track->sharedBuffer() != 0);
}
tracksToRemove->add(track);
// Avoids a misleading display in dumpsys
@@ -4399,8 +4402,12 @@
dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
// Make a non-atomic copy of fast mixer dump state so it won't change underneath us
- const FastMixerDumpState copy(mFastMixerDumpState);
- copy.dump(fd);
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+ copy->dump(fd);
+ delete copy;
#ifdef STATE_QUEUE_DUMP
// Similar for state queue
@@ -6373,9 +6380,13 @@
dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
- // Make a non-atomic copy of fast capture dump state so it won't change underneath us
- const FastCaptureDumpState copy(mFastCaptureDumpState);
- copy.dump(fd);
+ // Make a non-atomic copy of fast capture dump state so it won't change underneath us
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+ copy->dump(fd);
+ delete copy;
}
void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
index c12ddeb..418c94c 100755
--- a/services/audiopolicy/engineconfigurable/src/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -97,13 +97,13 @@
if (it == mVolumeProfiles.end()) {
ALOGE("%s: device category %d not found for stream %s", __FUNCTION__, deviceCategory,
getName().c_str());
- return 1.0f;
+ return 0.0f;
}
const VolumeCurvePoints curve = mVolumeProfiles[deviceCategory];
if (curve.size() != Volume::VOLCNT) {
ALOGE("%s: invalid profile for category %d and for stream %s", __FUNCTION__, deviceCategory,
getName().c_str());
- return 1.0f;
+ return 0.0f;
}
// the volume index in the UI is relative to the min and max volume indices for this stream type
@@ -112,7 +112,7 @@
if (mIndexMax - mIndexMin == 0) {
ALOGE("%s: Invalid volume indexes Min=Max=%d", __FUNCTION__, mIndexMin);
- return 1.0f;
+ return 0.0f;
}
int volIdx = (nbSteps * (indexInUi - mIndexMin)) /
(mIndexMax - mIndexMin);
@@ -120,7 +120,7 @@
// find what part of the curve this index volume belongs to, or if it's out of bounds
int segment = 0;
if (volIdx < curve[Volume::VOLMIN].mIndex) { // out of bounds
- return 0.0f;
+ return VOLUME_MIN_DB;
} else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
segment = 0;
} else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
@@ -128,7 +128,7 @@
} else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
segment = 2;
} else { // out of bounds
- return 1.0f;
+ return 0.0f;
}
// linear interpolation in the attenuation table in dB
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 45900c4..9ba8f3f 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -22,7 +22,6 @@
LOCAL_SRC_FILES:= \
CameraService.cpp \
- CameraDeviceFactory.cpp \
CameraFlashlight.cpp \
common/Camera2ClientBase.cpp \
common/CameraDeviceBase.cpp \
@@ -35,14 +34,10 @@
api1/client2/StreamingProcessor.cpp \
api1/client2/JpegProcessor.cpp \
api1/client2/CallbackProcessor.cpp \
- api1/client2/ZslProcessor.cpp \
- api1/client2/ZslProcessorInterface.cpp \
- api1/client2/BurstCapture.cpp \
api1/client2/JpegCompressor.cpp \
api1/client2/CaptureSequencer.cpp \
- api1/client2/ZslProcessor3.cpp \
+ api1/client2/ZslProcessor.cpp \
api2/CameraDeviceClient.cpp \
- device2/Camera2Device.cpp \
device3/Camera3Device.cpp \
device3/Camera3Stream.cpp \
device3/Camera3IOStreamBase.cpp \
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
deleted file mode 100644
index 6589e27..0000000
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "CameraDeviceFactory"
-#include <utils/Log.h>
-
-#include "CameraService.h"
-#include "CameraDeviceFactory.h"
-#include "common/CameraDeviceBase.h"
-#include "device2/Camera2Device.h"
-#include "device3/Camera3Device.h"
-
-namespace android {
-
-wp<CameraService> CameraDeviceFactory::sService;
-
-sp<CameraDeviceBase> CameraDeviceFactory::createDevice(int cameraId) {
-
- sp<CameraService> svc = sService.promote();
- if (svc == 0) {
- ALOGE("%s: No service registered", __FUNCTION__);
- return NULL;
- }
-
- int deviceVersion = svc->getDeviceVersion(cameraId, /*facing*/NULL);
-
- sp<CameraDeviceBase> device;
-
- switch (deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
- device = new Camera2Device(cameraId);
- break;
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- device = new Camera3Device(cameraId);
- break;
- default:
- ALOGE("%s: Camera %d: Unknown HAL device version %d",
- __FUNCTION__, cameraId, deviceVersion);
- device = NULL;
- break;
- }
-
- ALOGV_IF(device != 0, "Created a new camera device for version %d",
- deviceVersion);
-
- return device;
-}
-
-void CameraDeviceFactory::registerService(wp<CameraService> service) {
- ALOGV("%s: Registered service %p", __FUNCTION__,
- service.promote().get());
-
- sService = service;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.h b/services/camera/libcameraservice/CameraDeviceFactory.h
deleted file mode 100644
index 236dc56..0000000
--- a/services/camera/libcameraservice/CameraDeviceFactory.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
-#define ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
-
-#include <utils/RefBase.h>
-
-namespace android {
-
-class CameraDeviceBase;
-class CameraService;
-
-/**
- * Create the right instance of Camera2Device or Camera3Device
- * automatically based on the device version.
- */
-class CameraDeviceFactory : public virtual RefBase {
- public:
- static void registerService(wp<CameraService> service);
-
- // Prerequisite: Call registerService.
- static sp<CameraDeviceBase> createDevice(int cameraId);
- private:
- CameraDeviceFactory(wp<CameraService> service);
-
- static wp<CameraService> sService;
-};
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 406c1c4..0afd945 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -27,7 +27,7 @@
#include "gui/IGraphicBufferConsumer.h"
#include "gui/BufferQueue.h"
#include "camera/camera2/CaptureRequest.h"
-#include "CameraDeviceFactory.h"
+#include "device3/Camera3Device.h"
namespace android {
@@ -78,7 +78,7 @@
deviceVersion = info.device_version;
}
- if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
+ if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
CameraDeviceClientFlashControl *flashControl =
new CameraDeviceClientFlashControl(*mCameraModule,
*mCallbacks);
@@ -193,8 +193,6 @@
}
bool CameraFlashlight::hasFlashUnit(const String8& cameraId) {
- status_t res;
-
Mutex::Autolock l(mLock);
return hasFlashUnitLocked(cameraId);
}
@@ -302,7 +300,8 @@
/////////////////////////////////////////////////////////////////////
ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule,
const camera_module_callbacks_t& callbacks) :
- mCameraModule(&cameraModule) {
+ mCameraModule(&cameraModule) {
+ (void) callbacks;
}
ModuleFlashControl::~ModuleFlashControl() {
@@ -478,7 +477,7 @@
}
sp<CameraDeviceBase> device =
- CameraDeviceFactory::createDevice(atoi(cameraId.string()));
+ new Camera3Device(atoi(cameraId.string()));
if (device == NULL) {
return NO_MEMORY;
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index ebf6779..846d790 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -55,7 +55,6 @@
#include "api1/Camera2Client.h"
#include "api2/CameraDeviceClient.h"
#include "utils/CameraTraces.h"
-#include "CameraDeviceFactory.h"
namespace android {
@@ -246,8 +245,6 @@
mModule->setCallbacks(this);
}
- CameraDeviceFactory::registerService(this);
-
CameraService::pingCameraServiceProxy();
}
@@ -364,7 +361,8 @@
res = setTorchStatusLocked(cameraId, newStatus);
if (res) {
- ALOGE("%s: Failed to set the torch status", __FUNCTION__, (uint32_t)newStatus);
+ ALOGE("%s: Failed to set the torch status to %d: %s (%d)", __FUNCTION__,
+ (uint32_t)newStatus, strerror(-res), res);
return;
}
@@ -481,7 +479,6 @@
Vector<Size> sizes;
Vector<Size> jpegSizes;
Vector<int32_t> formats;
- const char* supportedPreviewFormats;
{
shimParams.getSupportedPreviewSizes(/*out*/sizes);
shimParams.getSupportedPreviewFormats(/*out*/formats);
@@ -559,7 +556,7 @@
int facing;
status_t ret = OK;
if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
- getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1 ) {
+ getDeviceVersion(cameraId, &facing) < CAMERA_DEVICE_API_VERSION_3_0) {
/**
* Backwards compatibility mode for old HALs:
* - Convert CameraInfo into static CameraMetadata properties.
@@ -725,8 +722,6 @@
return -EOPNOTSUPP;
}
break;
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
case CAMERA_DEVICE_API_VERSION_3_0:
case CAMERA_DEVICE_API_VERSION_3_1:
case CAMERA_DEVICE_API_VERSION_3_2:
@@ -1306,7 +1301,6 @@
// update the link to client's death
Mutex::Autolock al(mTorchClientMapMutex);
ssize_t index = mTorchClientMap.indexOfKey(id);
- BatteryNotifier& notifier(BatteryNotifier::getInstance());
if (enabled) {
if (index == NAME_NOT_FOUND) {
mTorchClientMap.add(id, clientBinder);
@@ -1463,8 +1457,6 @@
switch(deviceVersion) {
case CAMERA_DEVICE_API_VERSION_1_0:
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
case CAMERA_DEVICE_API_VERSION_3_0:
case CAMERA_DEVICE_API_VERSION_3_1:
if (apiVersion == API_VERSION_2) {
@@ -1555,9 +1547,29 @@
/**
* Check camera capabilities, such as support for basic color operation
+ * Also check that the device HAL version is still in support
*/
int CameraService::checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId) {
+ // Verify the device version is in the supported range
+ switch (info.device_version) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ case CAMERA_DEVICE_API_VERSION_3_2:
+ case CAMERA_DEVICE_API_VERSION_3_3:
+ // in support
+ break;
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ // no longer supported
+ default:
+ ALOGE("%s: Device %d has HAL version %x, which is not supported",
+ __FUNCTION__, id, info.device_version);
+ logServiceError("Unsupported device HAL version", NO_INIT);
+ return NO_INIT;
+ }
+
// Assume all devices pre-v3.3 are backward-compatible
bool isBackwardCompatible = true;
if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0
@@ -1591,10 +1603,10 @@
ALOGE("%s: Normal camera ID %d higher than strange camera ID %d. "
"This is not allowed due backward-compatibility requirements",
__FUNCTION__, id, *latestStrangeCameraId);
- logServiceError("Invalid order of camera devices", ENODEV);
+ logServiceError("Invalid order of camera devices", NO_INIT);
mNumberOfCameras = 0;
mNumberOfNormalCameras = 0;
- return INVALID_OPERATION;
+ return NO_INIT;
}
}
return OK;
@@ -1752,7 +1764,7 @@
void CameraService::logServiceError(const char* msg, int errorCode) {
String8 curTime = getFormattedCurrentTime();
- logEvent(String8::format("SERVICE ERROR: %s : %d (%s)", msg, errorCode, strerror(errorCode)));
+ logEvent(String8::format("SERVICE ERROR: %s : %d (%s)", msg, errorCode, strerror(-errorCode)));
}
status_t CameraService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
@@ -2073,6 +2085,8 @@
void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
const CaptureResultExtras& resultExtras) {
+ (void) errorCode;
+ (void) resultExtras;
if (mRemoteCallback != NULL) {
mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
} else {
@@ -2340,7 +2354,7 @@
result.appendFormat(" Resource Cost: %d\n", state.second->getCost());
result.appendFormat(" Conflicting Devices:");
for (auto& id : conflicting) {
- result.appendFormat(" %s", cameraId.string());
+ result.appendFormat(" %s", id.string());
}
if (conflicting.size() == 0) {
result.appendFormat(" NONE");
@@ -2348,7 +2362,7 @@
result.appendFormat("\n");
result.appendFormat(" Device version: %#x\n", deviceVersion);
- if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
+ if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
result.appendFormat(" Device static metadata:\n");
write(fd, result.string(), result.size());
dump_indented_camera_metadata(info.static_camera_characteristics,
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4b0eeb7..c5fe69f 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -620,6 +620,7 @@
/**
* Add a event log message that a serious service-level error has occured
+ * The errorCode should be one of the Android Errors
*/
void logServiceError(const char* msg, int errorCode);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 4338d64..175920f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -32,7 +32,6 @@
#include "api1/client2/CaptureSequencer.h"
#include "api1/client2/CallbackProcessor.h"
#include "api1/client2/ZslProcessor.h"
-#include "api1/client2/ZslProcessor3.h"
#define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
#define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
@@ -111,30 +110,11 @@
mCameraId);
mJpegProcessor->run(threadName.string());
- switch (mDeviceVersion) {
- case CAMERA_DEVICE_API_VERSION_2_0: {
- sp<ZslProcessor> zslProc =
- new ZslProcessor(this, mCaptureSequencer);
- mZslProcessor = zslProc;
- mZslProcessorThread = zslProc;
- break;
- }
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3: {
- sp<ZslProcessor3> zslProc =
- new ZslProcessor3(this, mCaptureSequencer);
- mZslProcessor = zslProc;
- mZslProcessorThread = zslProc;
- break;
- }
- default:
- break;
- }
+ mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+
threadName = String8::format("C2-%d-ZslProc",
mCameraId);
- mZslProcessorThread->run(threadName.string());
+ mZslProcessor->run(threadName.string());
mCallbackProcessor = new CallbackProcessor(this);
threadName = String8::format("C2-%d-CallbkProc",
@@ -414,7 +394,7 @@
mFrameProcessor->requestExit();
mCaptureSequencer->requestExit();
mJpegProcessor->requestExit();
- mZslProcessorThread->requestExit();
+ mZslProcessor->requestExit();
mCallbackProcessor->requestExit();
ALOGV("Camera %d: Waiting for threads", mCameraId);
@@ -428,7 +408,7 @@
mFrameProcessor->join();
mCaptureSequencer->join();
mJpegProcessor->join();
- mZslProcessorThread->join();
+ mZslProcessor->join();
mCallbackProcessor->join();
mBinderSerializationLock.lock();
@@ -442,9 +422,6 @@
mCallbackProcessor->deleteStream();
mZslProcessor->deleteStream();
- // Remove all ZSL stream state before disconnect; needed to work around b/15408128.
- mZslProcessor->disconnect();
-
ALOGV("Camera %d: Disconnecting device", mCameraId);
mDevice->disconnect();
@@ -761,8 +738,8 @@
// We could wait to create the JPEG output stream until first actual use
// (first takePicture call). However, this would substantially increase the
- // first capture latency on HAL3 devices, and potentially on some HAL2
- // devices. So create it unconditionally at preview start. As a drawback,
+ // first capture latency on HAL3 devices.
+ // So create it unconditionally at preview start. As a drawback,
// this increases gralloc memory consumption for applications that don't
// ever take a picture. Do not enter this mode when jpeg stream will slow
// down preview.
@@ -1069,35 +1046,33 @@
}
}
- // On current HALs, clean up ZSL before transitioning into recording
- if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
- if (mZslProcessor->getStreamId() != NO_STREAM) {
- ALOGV("%s: Camera %d: Clearing out zsl stream before "
- "creating recording stream", __FUNCTION__, mCameraId);
- res = mStreamingProcessor->stopStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
- __FUNCTION__, mCameraId);
- return res;
- }
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- }
- res = mZslProcessor->clearZslQueue();
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't clear zsl queue",
- __FUNCTION__, mCameraId);
- return res;
- }
- res = mZslProcessor->deleteStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete zsl stream before "
- "record: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
+ // Clean up ZSL before transitioning into recording
+ if (mZslProcessor->getStreamId() != NO_STREAM) {
+ ALOGV("%s: Camera %d: Clearing out zsl stream before "
+ "creating recording stream", __FUNCTION__, mCameraId);
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mZslProcessor->clearZslQueue();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't clear zsl queue",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mZslProcessor->deleteStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+ "record: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
}
}
@@ -1105,56 +1080,43 @@
// and we can't fail record start without stagefright asserting.
params.previewCallbackFlags = 0;
- if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
- // For newer devices, may need to reconfigure video snapshot JPEG sizes
- // during recording startup, so need a more complex sequence here to
- // ensure an early stream reconfiguration doesn't happen
- bool recordingStreamNeedsUpdate;
- res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
+ // May need to reconfigure video snapshot JPEG sizes
+ // during recording startup, so need a more complex sequence here to
+ // ensure an early stream reconfiguration doesn't happen
+ bool recordingStreamNeedsUpdate;
+ res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't query recording stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+
+ if (recordingStreamNeedsUpdate) {
+ // Need to stop stream here so updateProcessorStream won't trigger configureStream
+ // Right now camera device cannot handle configureStream failure gracefully
+ // when device is streaming
+ res = mStreamingProcessor->stopStream();
if (res != OK) {
- ALOGE("%s: Camera %d: Can't query recording stream",
- __FUNCTION__, mCameraId);
+ ALOGE("%s: Camera %d: Can't stop streaming to update record "
+ "stream", __FUNCTION__, mCameraId);
return res;
}
-
- if (recordingStreamNeedsUpdate) {
- // Need to stop stream here so updateProcessorStream won't trigger configureStream
- // Right now camera device cannot handle configureStream failure gracefully
- // when device is streaming
- res = mStreamingProcessor->stopStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't stop streaming to update record "
- "stream", __FUNCTION__, mCameraId);
- return res;
- }
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
- "%s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- }
-
- res = updateProcessorStream<
- StreamingProcessor,
- &StreamingProcessor::updateRecordingStream>(
- mStreamingProcessor,
- params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update recording stream: "
- "%s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
- }
- } else {
- // Maintain call sequencing for HALv2 devices.
- res = updateProcessorStream<
- StreamingProcessor,
- &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
- params);
+ res = mDevice->waitUntilDrained();
if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ }
+
+ res = updateProcessorStream<
+ StreamingProcessor,
+ &StreamingProcessor::updateRecordingStream>(
+ mStreamingProcessor,
+ params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update recording stream: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
return res;
}
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index d50bf63..e1e18c9 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -24,7 +24,7 @@
#include "api1/client2/FrameProcessor.h"
//#include "api1/client2/StreamingProcessor.h"
//#include "api1/client2/JpegProcessor.h"
-//#include "api1/client2/ZslProcessorInterface.h"
+//#include "api1/client2/ZslProcessor.h"
//#include "api1/client2/CaptureSequencer.h"
//#include "api1/client2/CallbackProcessor.h"
@@ -34,7 +34,7 @@
class StreamingProcessor;
class JpegProcessor;
-class ZslProcessorInterface;
+class ZslProcessor;
class CaptureSequencer;
class CallbackProcessor;
@@ -43,7 +43,7 @@
class IMemory;
/**
* Interface between android.hardware.Camera API and Camera HAL device for versions
- * CAMERA_DEVICE_API_VERSION_2_0 and 3_0.
+ * CAMERA_DEVICE_API_VERSION_3_0 and above.
*/
class Camera2Client :
public Camera2ClientBase<CameraService::Client>
@@ -204,12 +204,7 @@
sp<camera2::CaptureSequencer> mCaptureSequencer;
sp<camera2::JpegProcessor> mJpegProcessor;
- sp<camera2::ZslProcessorInterface> mZslProcessor;
- sp<Thread> mZslProcessorThread;
-
- /** Notification-related members */
-
- bool mAfInMotion;
+ sp<camera2::ZslProcessor> mZslProcessor;
/** Utility members */
bool mLegacyMode;
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
deleted file mode 100644
index 5502dcb..0000000
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Camera2-BurstCapture"
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include "BurstCapture.h"
-
-#include "api1/Camera2Client.h"
-#include "api1/client2/JpegCompressor.h"
-
-namespace android {
-namespace camera2 {
-
-BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer):
- mCaptureStreamId(NO_STREAM),
- mClient(client),
- mSequencer(sequencer)
-{
-}
-
-BurstCapture::~BurstCapture() {
-}
-
-status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/,
- int32_t /*firstCaptureId*/) {
- ALOGE("Not completely implemented");
- return INVALID_OPERATION;
-}
-
-void BurstCapture::onFrameAvailable(const BufferItem &/*item*/) {
- ALOGV("%s", __FUNCTION__);
- Mutex::Autolock l(mInputMutex);
- if(!mInputChanged) {
- mInputChanged = true;
- mInputSignal.signal();
- }
-}
-
-bool BurstCapture::threadLoop() {
- status_t res;
- {
- Mutex::Autolock l(mInputMutex);
- while(!mInputChanged) {
- res = mInputSignal.waitRelative(mInputMutex, kWaitDuration);
- if(res == TIMED_OUT) return true;
- }
- mInputChanged = false;
- }
-
- do {
- sp<Camera2Client> client = mClient.promote();
- if(client == 0) return false;
- ALOGV("%s: Calling processFrameAvailable()", __FUNCTION__);
- res = processFrameAvailable(client);
- } while(res == OK);
-
- return true;
-}
-
-CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
- CpuConsumer::LockedBuffer *imgBuffer,
- int /*quality*/)
-{
- ALOGV("%s", __FUNCTION__);
-
- CpuConsumer::LockedBuffer *imgEncoded = new CpuConsumer::LockedBuffer;
- uint8_t *data = new uint8_t[ANDROID_JPEG_MAX_SIZE];
- imgEncoded->data = data;
- imgEncoded->width = imgBuffer->width;
- imgEncoded->height = imgBuffer->height;
- imgEncoded->stride = imgBuffer->stride;
-
- Vector<CpuConsumer::LockedBuffer*> buffers;
- buffers.push_back(imgBuffer);
- buffers.push_back(imgEncoded);
-
- sp<JpegCompressor> jpeg = new JpegCompressor();
- jpeg->start(buffers, 1);
-
- bool success = jpeg->waitForDone(10 * 1e9);
- if(success) {
- return buffers[1];
- }
- else {
- ALOGE("%s: JPEG encode timed out", __FUNCTION__);
- return NULL; // TODO: maybe change function return value to status_t
- }
-}
-
-status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) {
- ALOGE("Not implemented");
- return INVALID_OPERATION;
-}
-
-} // namespace camera2
-} // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
deleted file mode 100644
index c3b7722..0000000
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
-#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
-
-#include <camera/CameraMetadata.h>
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
-#include <gui/CpuConsumer.h>
-
-#include "device2/Camera2Device.h"
-
-namespace android {
-
-class Camera2Client;
-
-namespace camera2 {
-
-class CaptureSequencer;
-
-class BurstCapture : public virtual Thread,
- public virtual CpuConsumer::FrameAvailableListener
-{
-public:
- BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
- virtual ~BurstCapture();
-
- virtual void onFrameAvailable(const BufferItem& item);
- virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId);
-
-protected:
- Mutex mInputMutex;
- bool mInputChanged;
- Condition mInputSignal;
- int mCaptureStreamId;
- wp<Camera2Client> mClient;
- wp<CaptureSequencer> mSequencer;
-
- // Should only be accessed by processing thread
- enum {
- NO_STREAM = -1
- };
-
- CpuConsumer::LockedBuffer* jpegEncode(
- CpuConsumer::LockedBuffer *imgBuffer,
- int quality);
-
- virtual status_t processFrameAvailable(sp<Camera2Client> &client);
-
-private:
- virtual bool threadLoop();
- static const nsecs_t kWaitDuration = 10000000; // 10 ms
-};
-
-} // namespace camera2
-} // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index a290536..a22442f 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -33,7 +33,7 @@
namespace camera2 {
-class Parameters;
+struct Parameters;
/***
* Still image capture output image processing
@@ -75,7 +75,6 @@
sp<CpuConsumer> mCallbackConsumer;
sp<Surface> mCallbackWindow;
sp<Camera2Heap> mCallbackHeap;
- int mCallbackHeapId;
size_t mCallbackHeapHead, mCallbackHeapFree;
virtual bool threadLoop();
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 5f7fd74..61e1442 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -26,9 +26,8 @@
#include "api1/Camera2Client.h"
#include "api1/client2/CaptureSequencer.h"
-#include "api1/client2/BurstCapture.h"
#include "api1/client2/Parameters.h"
-#include "api1/client2/ZslProcessorInterface.h"
+#include "api1/client2/ZslProcessor.h"
namespace android {
namespace camera2 {
@@ -59,7 +58,7 @@
ALOGV("%s: Exit", __FUNCTION__);
}
-void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) {
+void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
Mutex::Autolock l(mInputMutex);
mZslProcessor = processor;
}
@@ -111,6 +110,7 @@
void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
nsecs_t timestamp) {
ATRACE_CALL();
+ (void) timestamp;
Mutex::Autolock l(mInputMutex);
if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
mHalNotifiedShutter = true;
@@ -174,8 +174,6 @@
"STANDARD_PRECAPTURE_WAIT",
"STANDARD_CAPTURE",
"STANDARD_CAPTURE_WAIT",
- "BURST_CAPTURE_START",
- "BURST_CAPTURE_WAIT",
"DONE",
"ERROR",
"UNKNOWN"
@@ -192,8 +190,6 @@
&CaptureSequencer::manageStandardPrecaptureWait,
&CaptureSequencer::manageStandardCapture,
&CaptureSequencer::manageStandardCaptureWait,
- &CaptureSequencer::manageBurstCaptureStart,
- &CaptureSequencer::manageBurstCaptureWait,
&CaptureSequencer::manageDone,
};
@@ -293,7 +289,7 @@
}
takePictureCounter = l.mParameters.takePictureCounter;
}
- sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+ sp<ZslProcessor> processor = mZslProcessor.promote();
if (processor != 0) {
ALOGV("%s: Memory optimization, clearing ZSL queue",
__FUNCTION__);
@@ -336,10 +332,6 @@
return DONE;
}
- if(l.mParameters.lightFx != Parameters::LIGHTFX_NONE &&
- l.mParameters.state == Parameters::STILL_CAPTURE) {
- nextState = BURST_CAPTURE_START;
- }
else if (l.mParameters.zslMode &&
l.mParameters.state == Parameters::STILL_CAPTURE &&
l.mParameters.flashMode != Parameters::FLASH_MODE_ON) {
@@ -361,7 +353,7 @@
sp<Camera2Client> &client) {
ALOGV("%s", __FUNCTION__);
status_t res;
- sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+ sp<ZslProcessor> processor = mZslProcessor.promote();
if (processor == 0) {
ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
return DONE;
@@ -664,76 +656,6 @@
return STANDARD_CAPTURE_WAIT;
}
-CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
- sp<Camera2Client> &client) {
- ALOGV("%s", __FUNCTION__);
- status_t res;
- ATRACE_CALL();
-
- // check which burst mode is set, create respective burst object
- {
- SharedParameters::Lock l(client->getParameters());
-
- res = updateCaptureRequest(l.mParameters, client);
- if(res != OK) {
- return DONE;
- }
-
- //
- // check for burst mode type in mParameters here
- //
- mBurstCapture = new BurstCapture(client, this);
- }
-
- res = mCaptureRequest.update(ANDROID_REQUEST_ID, &mCaptureId, 1);
- if (res == OK) {
- res = mCaptureRequest.sort();
- }
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
- return DONE;
- }
-
- CameraMetadata captureCopy = mCaptureRequest;
- if (captureCopy.entryCount() == 0) {
- ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
- __FUNCTION__, client->getCameraId());
- return DONE;
- }
-
- Vector<CameraMetadata> requests;
- requests.push(mCaptureRequest);
- res = mBurstCapture->start(requests, mCaptureId);
- mTimeoutCount = kMaxTimeoutsForCaptureEnd * 10;
- return BURST_CAPTURE_WAIT;
-}
-
-CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
- sp<Camera2Client> &/*client*/) {
- status_t res;
- ATRACE_CALL();
- while (!mNewCaptureReceived) {
- res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
- if (res == TIMED_OUT) {
- mTimeoutCount--;
- break;
- }
- }
-
- if (mTimeoutCount <= 0) {
- ALOGW("Timed out waiting for burst capture to complete");
- return DONE;
- }
- if (mNewCaptureReceived) {
- mNewCaptureReceived = false;
- // TODO: update mCaptureId to last burst's capture ID + 1?
- return DONE;
- }
-
- return BURST_CAPTURE_WAIT;
-}
-
status_t CaptureSequencer::updateCaptureRequest(const Parameters ¶ms,
sp<Camera2Client> &client) {
ATRACE_CALL();
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index 10252fb..b05207e 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -34,8 +34,7 @@
namespace camera2 {
-class ZslProcessorInterface;
-class BurstCapture;
+class ZslProcessor;
/**
* Manages the still image capture process for
@@ -49,7 +48,7 @@
~CaptureSequencer();
// Get reference to the ZslProcessor, which holds the ZSL buffers and frames
- void setZslProcessor(wp<ZslProcessorInterface> processor);
+ void setZslProcessor(wp<ZslProcessor> processor);
// Begin still image capture
status_t startCapture(int msgType);
@@ -113,8 +112,7 @@
static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec
wp<Camera2Client> mClient;
- wp<ZslProcessorInterface> mZslProcessor;
- sp<BurstCapture> mBurstCapture;
+ wp<ZslProcessor> mZslProcessor;
enum CaptureState {
IDLE,
@@ -126,8 +124,6 @@
STANDARD_PRECAPTURE_WAIT,
STANDARD_CAPTURE,
STANDARD_CAPTURE_WAIT,
- BURST_CAPTURE_START,
- BURST_CAPTURE_WAIT,
DONE,
ERROR,
NUM_CAPTURE_STATES
@@ -165,9 +161,6 @@
CaptureState manageStandardCapture(sp<Camera2Client> &client);
CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
- CaptureState manageBurstCaptureStart(sp<Camera2Client> &client);
- CaptureState manageBurstCaptureWait(sp<Camera2Client> &client);
-
CaptureState manageDone(sp<Camera2Client> &client);
// Utility methods
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 40d53b3..6490682 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -170,7 +170,7 @@
entry = frame.find(ANDROID_SCALER_CROP_REGION);
if (entry.count < 4) {
- ALOGE("%s: Camera %d: Unable to read crop region (count = %d)",
+ ALOGE("%s: Camera %d: Unable to read crop region (count = %zu)",
__FUNCTION__, client->getCameraId(), entry.count);
return res;
}
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index bd9786f..3923853 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -108,7 +108,7 @@
return NO_MEMORY;
}
}
- ALOGV("%s: Camera %d: JPEG capture heap now %d bytes; requested %d bytes",
+ ALOGV("%s: Camera %d: JPEG capture heap now %zu bytes; requested %zd bytes",
__FUNCTION__, mId, mCaptureHeap->getSize(), maxJpegSize);
if (mCaptureStreamId != NO_STREAM) {
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index fbdae11..ac6f5c7 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -35,7 +35,7 @@
namespace camera2 {
class CaptureSequencer;
-class Parameters;
+struct Parameters;
/***
* Still image capture output image processing
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index fc5ebac..f901dda 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -913,8 +913,6 @@
ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
- lightFx = LIGHTFX_NONE;
-
state = STOPPED;
paramsFlattened = params.flatten();
@@ -1864,10 +1862,6 @@
ALOGE("%s: Video stabilization not supported", __FUNCTION__);
}
- // LIGHTFX
- validatedParams.lightFx = lightFxStringToEnum(
- newParams.get(CameraParameters::KEY_LIGHTFX));
-
/** Update internal parameters */
*this = validatedParams;
@@ -1959,7 +1953,7 @@
if (res != OK) return res;
// android.hardware.Camera requires that when face detect is enabled, the
- // camera is in a face-priority mode. HAL2 splits this into separate parts
+ // camera is in a face-priority mode. HAL3.x splits this into separate parts
// (face detection statistics and face priority scene mode). Map from other
// to the other.
bool sceneModeActive =
@@ -2501,18 +2495,6 @@
}
}
-Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
- const char *lightFxMode) {
- return
- !lightFxMode ?
- Parameters::LIGHTFX_NONE :
- !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
- Parameters::LIGHTFX_LOWLIGHT :
- !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
- Parameters::LIGHTFX_HDR :
- Parameters::LIGHTFX_NONE;
-}
-
status_t Parameters::parseAreas(const char *areasCStr,
Vector<Parameters::Area> *areas) {
static const size_t NUM_FIELDS = 5;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 972d007..c5bbf63 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -136,12 +136,6 @@
bool recordingHint;
bool videoStabilization;
- enum lightFxMode_t {
- LIGHTFX_NONE = 0,
- LIGHTFX_LOWLIGHT,
- LIGHTFX_HDR
- } lightFx;
-
CameraParameters2 params;
String8 paramsFlattened;
@@ -307,7 +301,6 @@
static const char* flashModeEnumToString(flashMode_t flashMode);
static focusMode_t focusModeStringToEnum(const char *focusMode);
static const char* focusModeEnumToString(focusMode_t focusMode);
- static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
static status_t parseAreas(const char *areasCStr,
Vector<Area> *areas);
@@ -330,7 +323,7 @@
static const int kFpsToApiScale = 1000;
// Transform from (-1000,-1000)-(1000,1000) normalized coords from camera
- // API to HAL2 (0,0)-(activePixelArray.width/height) coordinates
+ // API to HAL3 (0,0)-(activePixelArray.width/height) coordinates
int normalizedXToArray(int x) const;
int normalizedYToArray(int y) const;
@@ -350,7 +343,7 @@
private:
// Convert from viewfinder crop-region relative array coordinates
- // to HAL2 sensor array coordinates
+ // to HAL3 sensor array coordinates
int cropXToArray(int x) const;
int cropYToArray(int y) const;
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index e0cad3a..0b17eae 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -31,7 +31,7 @@
namespace camera2 {
-class Parameters;
+struct Parameters;
class Camera2Heap;
/**
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 0b79b31..b127472 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@
#ifdef LOG_NNDEBUG
#define ALOGVV(...) ALOGV(__VA_ARGS__)
#else
-#define ALOGVV(...) ((void)0)
+#define ALOGVV(...) if (0) ALOGV(__VA_ARGS__)
#endif
#include <inttypes.h>
@@ -35,6 +35,7 @@
#include "api1/Camera2Client.h"
#include "api1/client2/CaptureSequencer.h"
#include "api1/client2/ZslProcessor.h"
+#include "device3/Camera3Device.h"
namespace android {
namespace camera2 {
@@ -43,35 +44,55 @@
sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
Thread(false),
+ mLatestClearedBufferTimestamp(0),
mState(RUNNING),
mClient(client),
- mDevice(client->getCameraDevice()),
mSequencer(sequencer),
mId(client->getCameraId()),
- mDeleted(false),
- mZslBufferAvailable(false),
mZslStreamId(NO_STREAM),
- mZslReprocessStreamId(NO_STREAM),
mFrameListHead(0),
- mZslQueueHead(0),
- mZslQueueTail(0) {
- mZslQueue.insertAt(0, kZslBufferDepth);
- mFrameList.insertAt(0, kFrameListDepth);
+ mHasFocuser(false) {
+ // Initialize buffer queue and frame list based on pipeline max depth.
+ size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
+ if (client != 0) {
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device != 0) {
+ camera_metadata_ro_entry_t entry =
+ device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH);
+ if (entry.count == 1) {
+ pipelineMaxDepth = entry.data.u8[0];
+ } else {
+ ALOGW("%s: Unable to find the android.request.pipelineMaxDepth,"
+ " use default pipeline max depth %d", __FUNCTION__,
+ kDefaultMaxPipelineDepth);
+ }
+
+ entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
+ if (entry.count > 0 && entry.data.f[0] != 0.) {
+ mHasFocuser = true;
+ }
+ }
+ }
+
+ ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%zu)",
+ __FUNCTION__, pipelineMaxDepth);
+ // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
+ // earlier than metadata which causes the buffer corresponding to oldest metadata being
+ // removed.
+ mFrameListDepth = pipelineMaxDepth;
+ mBufferQueueDepth = mFrameListDepth + 1;
+
+
+ mZslQueue.insertAt(0, mBufferQueueDepth);
+ mFrameList.insertAt(0, mFrameListDepth);
sp<CaptureSequencer> captureSequencer = mSequencer.promote();
if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
}
ZslProcessor::~ZslProcessor() {
ALOGV("%s: Exit", __FUNCTION__);
- disconnect();
-}
-
-void ZslProcessor::onFrameAvailable(const BufferItem& /*item*/) {
- Mutex::Autolock l(mInputMutex);
- if (!mZslBufferAvailable) {
- mZslBufferAvailable = true;
- mZslBufferAvailableSignal.signal();
- }
+ deleteStream();
}
void ZslProcessor::onResultAvailable(const CaptureResult &result) {
@@ -81,35 +102,27 @@
camera_metadata_ro_entry_t entry;
entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
- (void)timestamp;
- ALOGVV("Got preview frame for timestamp %" PRId64, timestamp);
+ if (entry.count == 0) {
+ ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
+ return;
+ }
+
+ entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
+ if (entry.count == 0) {
+ ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
+ return;
+ }
+ int32_t frameNumber = entry.data.i32[0];
+
+ ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp);
if (mState != RUNNING) return;
+ // Corresponding buffer has been cleared. No need to push into mFrameList
+ if (timestamp <= mLatestClearedBufferTimestamp) return;
+
mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
- mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
-
- findMatchesLocked();
-}
-
-void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
- Mutex::Autolock l(mInputMutex);
-
- // Verify that the buffer is in our queue
- size_t i = 0;
- for (; i < mZslQueue.size(); i++) {
- if (&(mZslQueue[i].buffer.mGraphicBuffer->handle) == handle) break;
- }
- if (i == mZslQueue.size()) {
- ALOGW("%s: Released buffer %p not found in queue",
- __FUNCTION__, handle);
- }
-
- // Erase entire ZSL queue since we've now completed the capture and preview
- // is stopped.
- clearZslQueueLocked();
-
- mState = RUNNING;
+ mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
}
status_t ZslProcessor::updateStream(const Parameters ¶ms) {
@@ -124,25 +137,13 @@
ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
return INVALID_OPERATION;
}
- sp<CameraDeviceBase> device = mDevice.promote();
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
if (device == 0) {
ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
return INVALID_OPERATION;
}
- if (mZslConsumer == 0) {
- // Create CPU buffer queue endpoint
- sp<IGraphicBufferProducer> producer;
- sp<IGraphicBufferConsumer> consumer;
- BufferQueue::createBufferQueue(&producer, &consumer);
- mZslConsumer = new BufferItemConsumer(consumer,
- GRALLOC_USAGE_HW_CAMERA_ZSL,
- kZslBufferDepth);
- mZslConsumer->setFrameAvailableListener(this);
- mZslConsumer->setName(String8("Camera2-ZslConsumer"));
- mZslWindow = new Surface(producer);
- }
-
if (mZslStreamId != NO_STREAM) {
// Check if stream parameters have to change
uint32_t currentWidth, currentHeight;
@@ -151,57 +152,50 @@
if (res != OK) {
ALOGE("%s: Camera %d: Error querying capture output stream info: "
"%s (%d)", __FUNCTION__,
- mId, strerror(-res), res);
+ client->getCameraId(), strerror(-res), res);
return res;
}
if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
- res = device->deleteReprocessStream(mZslReprocessStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
- "for ZSL: %s (%d)", __FUNCTION__,
- mId, strerror(-res), res);
- return res;
- }
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
- __FUNCTION__, mId, mZslStreamId);
+ ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+ "dimensions changed",
+ __FUNCTION__, client->getCameraId(), mZslStreamId);
res = device->deleteStream(mZslStreamId);
- if (res != OK) {
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+ " after it becomes idle", __FUNCTION__, mId);
+ return res;
+ } else if(res != OK) {
ALOGE("%s: Camera %d: Unable to delete old output stream "
"for ZSL: %s (%d)", __FUNCTION__,
- mId, strerror(-res), res);
+ client->getCameraId(), strerror(-res), res);
return res;
}
mZslStreamId = NO_STREAM;
}
}
- mDeleted = false;
-
if (mZslStreamId == NO_STREAM) {
// Create stream for HAL production
// TODO: Sort out better way to select resolution for ZSL
- int streamType = params.quirks.useZslFormat ?
- (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL :
- (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- res = device->createStream(mZslWindow,
- params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, streamType,
- HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
+
+ // Note that format specified internally in Camera3ZslStream
+ res = device->createZslStream(
+ params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
+ mBufferQueueDepth,
+ &mZslStreamId,
+ &mZslStream);
if (res != OK) {
- ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
- "%s (%d)", __FUNCTION__, mId,
+ ALOGE("%s: Camera %d: Can't create ZSL stream: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
strerror(-res), res);
return res;
}
- res = device->createReprocessStreamFromStream(mZslStreamId,
- &mZslReprocessStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
- "%s (%d)", __FUNCTION__, mId,
- strerror(-res), res);
- return res;
- }
+
+ // Only add the camera3 buffer listener when the stream is created.
+ mZslStream->addBufferListener(this);
}
+
client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
Camera2Client::kPreviewRequestIdEnd,
this,
@@ -212,47 +206,32 @@
status_t ZslProcessor::deleteStream() {
ATRACE_CALL();
- Mutex::Autolock l(mInputMutex);
- // WAR(b/15408128): do not delete stream unless client is being disconnected.
- mDeleted = true;
- return OK;
-}
-
-status_t ZslProcessor::disconnect() {
- ATRACE_CALL();
status_t res;
Mutex::Autolock l(mInputMutex);
if (mZslStreamId != NO_STREAM) {
- sp<CameraDeviceBase> device = mDevice.promote();
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ sp<Camera3Device> device =
+ reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
if (device == 0) {
ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
return INVALID_OPERATION;
}
- clearZslQueueLocked();
-
- res = device->deleteReprocessStream(mZslReprocessStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: "
- "%s (%d)", __FUNCTION__, mId,
- mZslReprocessStreamId, strerror(-res), res);
- return res;
- }
-
- mZslReprocessStreamId = NO_STREAM;
res = device->deleteStream(mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
- "%s (%d)", __FUNCTION__, mId,
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
mZslStreamId, strerror(-res), res);
return res;
}
- mZslWindow.clear();
- mZslConsumer.clear();
-
mZslStreamId = NO_STREAM;
}
return OK;
@@ -263,6 +242,46 @@
return mZslStreamId;
}
+status_t ZslProcessor::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) {
+ ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+ sp<Camera3Device> device =
+ static_cast<Camera3Device*>(client->getCameraDevice().get());
+ if (device == 0) {
+ ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ CameraMetadata stillTemplate;
+ device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);
+
+ // Find some of the post-processing tags, and assign the value from template to the request.
+ // Only check the aberration mode and noise reduction mode for now, as they are very important
+ // for image quality.
+ uint32_t postProcessingTags[] = {
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_COLOR_CORRECTION_MODE,
+ ANDROID_TONEMAP_MODE,
+ ANDROID_SHADING_MODE,
+ ANDROID_HOT_PIXEL_MODE,
+ ANDROID_EDGE_MODE
+ };
+
+ camera_metadata_entry_t entry;
+ for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
+ entry = stillTemplate.find(postProcessingTags[i]);
+ if (entry.count > 0) {
+ request.update(postProcessingTags[i], entry.data.u8, 1);
+ }
+ }
+
+ return OK;
+}
+
status_t ZslProcessor::pushToReprocess(int32_t requestId) {
ALOGV("%s: Send in reprocess request with id %d",
__FUNCTION__, requestId);
@@ -279,21 +298,30 @@
dumpZslQueue(-1);
}
- if (mZslQueueTail != mZslQueueHead) {
- CameraMetadata request;
- size_t index = mZslQueueTail;
- while (index != mZslQueueHead) {
- if (!mZslQueue[index].frame.isEmpty()) {
- request = mZslQueue[index].frame;
- break;
- }
- index = (index + 1) % kZslBufferDepth;
- }
- if (index == mZslQueueHead) {
- ALOGV("%s: ZSL queue has no valid frames to send yet.",
- __FUNCTION__);
- return NOT_ENOUGH_DATA;
- }
+ size_t metadataIdx;
+ nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
+
+ if (candidateTimestamp == -1) {
+ ALOGE("%s: Could not find good candidate for ZSL reprocessing",
+ __FUNCTION__);
+ return NOT_ENOUGH_DATA;
+ }
+
+ res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
+ /*actualTimestamp*/NULL);
+
+ if (res == mZslStream->NO_BUFFER_AVAILABLE) {
+ ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
+ return NOT_ENOUGH_DATA;
+ } else if (res != OK) {
+ ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ {
+ CameraMetadata request = mFrameList[metadataIdx];
+
// Verify that the frame is reasonable for reprocessing
camera_metadata_entry_t entry;
@@ -310,25 +338,51 @@
return NOT_ENOUGH_DATA;
}
- buffer_handle_t *handle =
- &(mZslQueue[index].buffer.mGraphicBuffer->handle);
-
uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
res = request.update(ANDROID_REQUEST_TYPE,
&requestType, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request type",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
int32_t inputStreams[1] =
- { mZslReprocessStreamId };
- if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+ { mZslStreamId };
+ res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
inputStreams, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request input streams",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ uint8_t captureIntent =
+ static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
+ res = request.update(ANDROID_CONTROL_CAPTURE_INTENT,
+ &captureIntent, 1);
+ if (res != OK ) {
+ ALOGE("%s: Unable to update request capture intent",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ // TODO: Shouldn't we also update the latest preview frame?
int32_t outputStreams[1] =
{ client->getCaptureStreamId() };
- if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ res = request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreams, 1);
+ if (res != OK) {
+ ALOGE("%s: Unable to update request output streams",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
res = request.update(ANDROID_REQUEST_ID,
&requestId, 1);
-
if (res != OK ) {
- ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+ ALOGE("%s: Unable to update frame to a reprocess request",
+ __FUNCTION__);
return INVALID_OPERATION;
}
@@ -336,17 +390,9 @@
if (res != OK) {
ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
"%s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
return INVALID_OPERATION;
}
- // TODO: have push-and-clear be atomic
- res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
- handle, this);
- if (res != OK) {
- ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
// Update JPEG settings
{
@@ -355,25 +401,30 @@
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
"capture request: %s (%d)", __FUNCTION__,
- mId,
+ client->getCameraId(),
strerror(-res), res);
return res;
}
}
+ // Update post-processing settings
+ res = updateRequestWithDefaultStillRequest(request);
+ if (res != OK) {
+ ALOGW("%s: Unable to update post-processing tags, the reprocessed image quality "
+ "may be compromised", __FUNCTION__);
+ }
+
mLatestCapturedRequest = request;
res = client->getCameraDevice()->capture(request);
if (res != OK ) {
- ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
- __FUNCTION__, strerror(-res), res);
+ ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
+ " (%d)", __FUNCTION__, strerror(-res), res);
return res;
}
mState = LOCKED;
- } else {
- ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
- return NOT_ENOUGH_DATA;
}
+
return OK;
}
@@ -386,17 +437,20 @@
}
status_t ZslProcessor::clearZslQueueLocked() {
- for (size_t i = 0; i < mZslQueue.size(); i++) {
- if (mZslQueue[i].buffer.mTimestamp != 0) {
- mZslConsumer->releaseBuffer(mZslQueue[i].buffer);
- }
- mZslQueue.replaceAt(i);
+ if (mZslStream != 0) {
+ // clear result metadata list first.
+ clearZslResultQueueLocked();
+ return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
}
- mZslQueueHead = 0;
- mZslQueueTail = 0;
return OK;
}
+void ZslProcessor::clearZslResultQueueLocked() {
+ mFrameList.clear();
+ mFrameListHead = 0;
+ mFrameList.insertAt(0, mFrameListDepth);
+}
+
void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const {
Mutex::Autolock l(mInputMutex);
if (!mLatestCapturedRequest.isEmpty()) {
@@ -411,128 +465,9 @@
}
bool ZslProcessor::threadLoop() {
- status_t res;
-
- {
- Mutex::Autolock l(mInputMutex);
- while (!mZslBufferAvailable) {
- res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
- kWaitDuration);
- if (res == TIMED_OUT) return true;
- }
- mZslBufferAvailable = false;
- }
-
- do {
- res = processNewZslBuffer();
- } while (res == OK);
-
- return true;
-}
-
-status_t ZslProcessor::processNewZslBuffer() {
- ATRACE_CALL();
- status_t res;
- sp<BufferItemConsumer> zslConsumer;
- {
- Mutex::Autolock l(mInputMutex);
- if (mZslConsumer == 0) return OK;
- zslConsumer = mZslConsumer;
- }
- ALOGVV("Trying to get next buffer");
- BufferItem item;
- res = zslConsumer->acquireBuffer(&item, 0);
- if (res != OK) {
- if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
- ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
- "%s (%d)", __FUNCTION__,
- mId, strerror(-res), res);
- } else {
- ALOGVV(" No buffer");
- }
- return res;
- }
-
- Mutex::Autolock l(mInputMutex);
-
- if (mState == LOCKED) {
- ALOGVV("In capture, discarding new ZSL buffers");
- zslConsumer->releaseBuffer(item);
- return OK;
- }
-
- ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
-
- if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
- ALOGVV("Releasing oldest buffer");
- zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
- mZslQueue.replaceAt(mZslQueueTail);
- mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
- }
-
- ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
-
- queueHead.buffer = item;
- queueHead.frame.release();
-
- mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
-
- ALOGVV(" Acquired buffer, timestamp %" PRId64, queueHead.buffer.mTimestamp);
-
- findMatchesLocked();
-
- return OK;
-}
-
-void ZslProcessor::findMatchesLocked() {
- ALOGVV("Scanning");
- for (size_t i = 0; i < mZslQueue.size(); i++) {
- ZslPair &queueEntry = mZslQueue.editItemAt(i);
- nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
- IF_ALOGV() {
- camera_metadata_entry_t entry;
- nsecs_t frameTimestamp = 0;
- if (!queueEntry.frame.isEmpty()) {
- entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
- frameTimestamp = entry.data.i64[0];
- }
- ALOGVV(" %d: b: %" PRId64 "\tf: %" PRId64, i,
- bufferTimestamp, frameTimestamp );
- }
- if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
- // Have buffer, no matching frame. Look for one
- for (size_t j = 0; j < mFrameList.size(); j++) {
- bool match = false;
- CameraMetadata &frame = mFrameList.editItemAt(j);
- if (!frame.isEmpty()) {
- camera_metadata_entry_t entry;
- entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
- if (entry.count == 0) {
- ALOGE("%s: Can't find timestamp in frame!",
- __FUNCTION__);
- continue;
- }
- nsecs_t frameTimestamp = entry.data.i64[0];
- if (bufferTimestamp == frameTimestamp) {
- ALOGVV("%s: Found match %" PRId64, __FUNCTION__,
- frameTimestamp);
- match = true;
- } else {
- int64_t delta = abs(bufferTimestamp - frameTimestamp);
- if ( delta < 1000000) {
- ALOGVV("%s: Found close match %" PRId64 " (delta %" PRId64 ")",
- __FUNCTION__, bufferTimestamp, delta);
- match = true;
- }
- }
- }
- if (match) {
- queueEntry.frame.acquire(frame);
- break;
- }
- }
- }
- }
+ // TODO: remove dependency on thread. For now, shut thread down right
+ // away.
+ return false;
}
void ZslProcessor::dumpZslQueue(int fd) const {
@@ -567,5 +502,174 @@
}
}
+bool ZslProcessor::isFixedFocusMode(uint8_t afMode) const {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ return false;
+ break;
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
+ return true;
+ default:
+ ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
+ return false;
+ }
+}
+
+nsecs_t ZslProcessor::getCandidateTimestampLocked(size_t* metadataIdx) const {
+ /**
+ * Find the smallest timestamp we know about so far
+ * - ensure that aeState is either converged or locked
+ */
+
+ size_t idx = 0;
+ nsecs_t minTimestamp = -1;
+
+ size_t emptyCount = mFrameList.size();
+
+ for (size_t j = 0; j < mFrameList.size(); j++) {
+ const CameraMetadata &frame = mFrameList[j];
+ if (!frame.isEmpty()) {
+
+ emptyCount--;
+
+ camera_metadata_ro_entry_t entry;
+ entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 0) {
+ ALOGE("%s: Can't find timestamp in frame!",
+ __FUNCTION__);
+ continue;
+ }
+ nsecs_t frameTimestamp = entry.data.i64[0];
+ if (minTimestamp > frameTimestamp || minTimestamp == -1) {
+
+ entry = frame.find(ANDROID_CONTROL_AE_STATE);
+
+ if (entry.count == 0) {
+ /**
+ * This is most likely a HAL bug. The aeState field is
+ * mandatory, so it should always be in a metadata packet.
+ */
+ ALOGW("%s: ZSL queue frame has no AE state field!",
+ __FUNCTION__);
+ continue;
+ }
+ if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+ entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+ ALOGVV("%s: ZSL queue frame AE state is %d, need "
+ "full capture", __FUNCTION__, entry.data.u8[0]);
+ continue;
+ }
+
+ entry = frame.find(ANDROID_CONTROL_AF_MODE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF mode field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afMode = entry.data.u8[0];
+ if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
+ // Skip all the ZSL buffer for manual AF mode, as we don't really
+ // know the af state.
+ continue;
+ }
+
+ // Check AF state if device has focuser and focus mode isn't fixed
+ if (mHasFocuser && !isFixedFocusMode(afMode)) {
+ // Make sure the candidate frame has good focus.
+ entry = frame.find(ANDROID_CONTROL_AF_STATE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF state field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afState = entry.data.u8[0];
+ if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+ afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+ afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+ ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
+ __FUNCTION__, afState);
+ continue;
+ }
+ }
+
+ minTimestamp = frameTimestamp;
+ idx = j;
+ }
+
+ ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
+ }
+ }
+
+ if (emptyCount == mFrameList.size()) {
+ /**
+ * This could be mildly bad and means our ZSL was triggered before
+ * there were any frames yet received by the camera framework.
+ *
+ * This is a fairly corner case which can happen under:
+ * + a user presses the shutter button real fast when the camera starts
+ * (startPreview followed immediately by takePicture).
+ * + burst capture case (hitting shutter button as fast possible)
+ *
+ * If this happens in steady case (preview running for a while, call
+ * a single takePicture) then this might be a fwk bug.
+ */
+ ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
+ }
+
+ ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
+ __FUNCTION__, minTimestamp, idx, emptyCount);
+
+ if (metadataIdx) {
+ *metadataIdx = idx;
+ }
+
+ return minTimestamp;
+}
+
+void ZslProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+ // Intentionally left empty
+ // Although theoretically we could use this to get better dump info
+}
+
+void ZslProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
+
+ // ignore output buffers
+ if (bufferInfo.mOutput) {
+ return;
+ }
+
+ // Lock mutex only once we know this is an input buffer returned to avoid
+ // potential deadlock
+ Mutex::Autolock l(mInputMutex);
+ // TODO: Verify that the buffer is in our queue by looking at timestamp
+ // theoretically unnecessary unless we change the following assumptions:
+ // -- only 1 buffer reprocessed at a time (which is the case now)
+
+ // Erase entire ZSL queue since we've now completed the capture and preview
+ // is stopped.
+ //
+ // We need to guarantee that if we do two back-to-back captures,
+ // the second won't use a buffer that's older/the same as the first, which
+ // is theoretically possible if we don't clear out the queue and the
+ // selection criteria is something like 'newest'. Clearing out the result
+ // metadata queue on a completed capture ensures we'll only use new timestamp.
+ // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
+ // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
+ // to hold the same lock.
+ // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
+ // it is safe not to do so, as back to back ZSL capture requires stop and start
+ // preview, which will flush ZSL queue automatically.
+ ALOGV("%s: Memory optimization, clearing ZSL queue",
+ __FUNCTION__);
+ clearZslResultQueueLocked();
+
+ // Required so we accept more ZSL requests
+ mState = RUNNING;
+}
+
}; // namespace camera2
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 5870bd3..86c06c6 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,11 +25,9 @@
#include <gui/BufferItem.h>
#include <gui/BufferItemConsumer.h>
#include <camera/CameraMetadata.h>
-#include <camera/CaptureResult.h>
-#include "common/CameraDeviceBase.h"
-#include "api1/client2/ZslProcessorInterface.h"
#include "api1/client2/FrameProcessor.h"
+#include "device3/Camera3ZslStream.h"
namespace android {
@@ -38,45 +36,66 @@
namespace camera2 {
class CaptureSequencer;
-class Parameters;
+struct Parameters;
/***
- * ZSL queue processing
+ * ZSL queue processing for HALv3.0 or newer
*/
-class ZslProcessor:
+class ZslProcessor :
+ public camera3::Camera3StreamBufferListener,
virtual public Thread,
- virtual public BufferItemConsumer::FrameAvailableListener,
- virtual public FrameProcessor::FilteredListener,
- virtual public CameraDeviceBase::BufferReleasedListener,
- public ZslProcessorInterface {
+ virtual public FrameProcessor::FilteredListener {
public:
ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~ZslProcessor();
- // From mZslConsumer
- virtual void onFrameAvailable(const BufferItem& item);
- // From FrameProcessor
+ // From FrameProcessor::FilteredListener
virtual void onResultAvailable(const CaptureResult &result);
- virtual void onBufferReleased(buffer_handle_t *handle);
-
/**
****************************************
* ZslProcessorInterface implementation *
****************************************
*/
+ // Update the streams by recreating them if the size/format has changed
status_t updateStream(const Parameters ¶ms);
+
+ // Delete the underlying CameraDevice streams
status_t deleteStream();
- status_t disconnect();
+
+ // Get ID for use with android.request.outputStreams / inputStreams
int getStreamId() const;
+ /**
+ * Submits a ZSL capture request (id = requestId)
+ *
+ * An appropriate ZSL buffer is selected by the closest timestamp,
+ * then we push that buffer to be reprocessed by the HAL.
+ * A capture request is created and submitted on behalf of the client.
+ */
status_t pushToReprocess(int32_t requestId);
+
+ // Flush the ZSL buffer queue, freeing up all the buffers
status_t clearZslQueue();
void dump(int fd, const Vector<String16>& args) const;
+
+ protected:
+ /**
+ **********************************************
+ * Camera3StreamBufferListener implementation *
+ **********************************************
+ */
+ typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
+ // Buffer was acquired by the HAL
+ virtual void onBufferAcquired(const BufferInfo& bufferInfo);
+ // Buffer was released by the HAL
+ virtual void onBufferReleased(const BufferInfo& bufferInfo);
+
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ nsecs_t mLatestClearedBufferTimestamp;
enum {
RUNNING,
@@ -84,53 +103,52 @@
} mState;
wp<Camera2Client> mClient;
- wp<CameraDeviceBase> mDevice;
wp<CaptureSequencer> mSequencer;
- int mId;
- bool mDeleted;
+ const int mId;
mutable Mutex mInputMutex;
- bool mZslBufferAvailable;
- Condition mZslBufferAvailableSignal;
enum {
NO_STREAM = -1
};
int mZslStreamId;
- int mZslReprocessStreamId;
- sp<BufferItemConsumer> mZslConsumer;
- sp<Surface> mZslWindow;
+ sp<camera3::Camera3ZslStream> mZslStream;
struct ZslPair {
BufferItem buffer;
CameraMetadata frame;
};
- static const size_t kZslBufferDepth = 4;
- static const size_t kFrameListDepth = kZslBufferDepth * 2;
+ static const int32_t kDefaultMaxPipelineDepth = 4;
+ size_t mBufferQueueDepth;
+ size_t mFrameListDepth;
Vector<CameraMetadata> mFrameList;
size_t mFrameListHead;
ZslPair mNextPair;
Vector<ZslPair> mZslQueue;
- size_t mZslQueueHead;
- size_t mZslQueueTail;
CameraMetadata mLatestCapturedRequest;
+ bool mHasFocuser;
+
virtual bool threadLoop();
- status_t processNewZslBuffer();
-
- // Match up entries from frame list to buffers in ZSL queue
- void findMatchesLocked();
-
status_t clearZslQueueLocked();
+ void clearZslResultQueueLocked();
+
void dumpZslQueue(int id) const;
+
+ nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+
+ bool isFixedFocusMode(uint8_t afMode) const;
+
+ // Update the post-processing metadata with the default still capture request template
+ status_t updateRequestWithDefaultStillRequest(CameraMetadata &request) const;
};
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
deleted file mode 100644
index 69620ac..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-ZslProcessor3"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-//#define LOG_NNDEBUG 0
-
-#ifdef LOG_NNDEBUG
-#define ALOGVV(...) ALOGV(__VA_ARGS__)
-#else
-#define ALOGVV(...) ((void)0)
-#endif
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include <gui/Surface.h>
-
-#include "common/CameraDeviceBase.h"
-#include "api1/Camera2Client.h"
-#include "api1/client2/CaptureSequencer.h"
-#include "api1/client2/ZslProcessor3.h"
-#include "device3/Camera3Device.h"
-
-namespace android {
-namespace camera2 {
-
-ZslProcessor3::ZslProcessor3(
- sp<Camera2Client> client,
- wp<CaptureSequencer> sequencer):
- Thread(false),
- mLatestClearedBufferTimestamp(0),
- mState(RUNNING),
- mClient(client),
- mSequencer(sequencer),
- mId(client->getCameraId()),
- mZslStreamId(NO_STREAM),
- mFrameListHead(0),
- mZslQueueHead(0),
- mZslQueueTail(0),
- mHasFocuser(false) {
- // Initialize buffer queue and frame list based on pipeline max depth.
- size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
- if (client != 0) {
- sp<Camera3Device> device =
- static_cast<Camera3Device*>(client->getCameraDevice().get());
- if (device != 0) {
- camera_metadata_ro_entry_t entry =
- device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH);
- if (entry.count == 1) {
- pipelineMaxDepth = entry.data.u8[0];
- } else {
- ALOGW("%s: Unable to find the android.request.pipelineMaxDepth,"
- " use default pipeline max depth %zu", __FUNCTION__,
- kDefaultMaxPipelineDepth);
- }
-
- entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
- if (entry.count > 0 && entry.data.f[0] != 0.) {
- mHasFocuser = true;
- }
- }
- }
-
- ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)",
- __FUNCTION__, pipelineMaxDepth);
- // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
- // earlier than metadata which causes the buffer corresponding to oldest metadata being
- // removed.
- mFrameListDepth = pipelineMaxDepth;
- mBufferQueueDepth = mFrameListDepth + 1;
-
-
- mZslQueue.insertAt(0, mBufferQueueDepth);
- mFrameList.insertAt(0, mFrameListDepth);
- sp<CaptureSequencer> captureSequencer = mSequencer.promote();
- if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
-}
-
-ZslProcessor3::~ZslProcessor3() {
- ALOGV("%s: Exit", __FUNCTION__);
- deleteStream();
-}
-
-void ZslProcessor3::onResultAvailable(const CaptureResult &result) {
- ATRACE_CALL();
- ALOGV("%s:", __FUNCTION__);
- Mutex::Autolock l(mInputMutex);
- camera_metadata_ro_entry_t entry;
- entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
- nsecs_t timestamp = entry.data.i64[0];
- if (entry.count == 0) {
- ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
- return;
- }
-
- entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
- if (entry.count == 0) {
- ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
- return;
- }
- int32_t frameNumber = entry.data.i32[0];
-
- ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp);
-
- if (mState != RUNNING) return;
-
- // Corresponding buffer has been cleared. No need to push into mFrameList
- if (timestamp <= mLatestClearedBufferTimestamp) return;
-
- mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
- mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
-}
-
-status_t ZslProcessor3::updateStream(const Parameters ¶ms) {
- ATRACE_CALL();
- ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
- status_t res;
-
- Mutex::Autolock l(mInputMutex);
-
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) {
- ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
- sp<Camera3Device> device =
- static_cast<Camera3Device*>(client->getCameraDevice().get());
- if (device == 0) {
- ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- if (mZslStreamId != NO_STREAM) {
- // Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = device->getStreamInfo(mZslStreamId,
- ¤tWidth, ¤tHeight, 0, 0);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error querying capture output stream info: "
- "%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
- }
- if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
- currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
- "dimensions changed",
- __FUNCTION__, client->getCameraId(), mZslStreamId);
- res = device->deleteStream(mZslStreamId);
- if (res == -EBUSY) {
- ALOGV("%s: Camera %d: Device is busy, call updateStream again "
- " after it becomes idle", __FUNCTION__, mId);
- return res;
- } else if(res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
- }
- mZslStreamId = NO_STREAM;
- }
- }
-
- if (mZslStreamId == NO_STREAM) {
- // Create stream for HAL production
- // TODO: Sort out better way to select resolution for ZSL
-
- // Note that format specified internally in Camera3ZslStream
- res = device->createZslStream(
- params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
- mBufferQueueDepth,
- &mZslStreamId,
- &mZslStream);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't create ZSL stream: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
- strerror(-res), res);
- return res;
- }
-
- // Only add the camera3 buffer listener when the stream is created.
- mZslStream->addBufferListener(this);
- }
-
- client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
- Camera2Client::kPreviewRequestIdEnd,
- this,
- /*sendPartials*/false);
-
- return OK;
-}
-
-status_t ZslProcessor3::deleteStream() {
- ATRACE_CALL();
- status_t res;
-
- Mutex::Autolock l(mInputMutex);
-
- if (mZslStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) {
- ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- sp<Camera3Device> device =
- reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
- if (device == 0) {
- ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- res = device->deleteStream(mZslStreamId);
- if (res != OK) {
- ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
- "%s (%d)", __FUNCTION__, client->getCameraId(),
- mZslStreamId, strerror(-res), res);
- return res;
- }
-
- mZslStreamId = NO_STREAM;
- }
- return OK;
-}
-
-int ZslProcessor3::getStreamId() const {
- Mutex::Autolock l(mInputMutex);
- return mZslStreamId;
-}
-
-status_t ZslProcessor3::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
- sp<Camera2Client> client = mClient.promote();
- if (client == 0) {
- ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
- sp<Camera3Device> device =
- static_cast<Camera3Device*>(client->getCameraDevice().get());
- if (device == 0) {
- ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- CameraMetadata stillTemplate;
- device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);
-
- // Find some of the post-processing tags, and assign the value from template to the request.
- // Only check the aberration mode and noise reduction mode for now, as they are very important
- // for image quality.
- uint32_t postProcessingTags[] = {
- ANDROID_NOISE_REDUCTION_MODE,
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- ANDROID_COLOR_CORRECTION_MODE,
- ANDROID_TONEMAP_MODE,
- ANDROID_SHADING_MODE,
- ANDROID_HOT_PIXEL_MODE,
- ANDROID_EDGE_MODE
- };
-
- camera_metadata_entry_t entry;
- for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
- entry = stillTemplate.find(postProcessingTags[i]);
- if (entry.count > 0) {
- request.update(postProcessingTags[i], entry.data.u8, 1);
- }
- }
-
- return OK;
-}
-
-status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
- ALOGV("%s: Send in reprocess request with id %d",
- __FUNCTION__, requestId);
- Mutex::Autolock l(mInputMutex);
- status_t res;
- sp<Camera2Client> client = mClient.promote();
-
- if (client == 0) {
- ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- IF_ALOGV() {
- dumpZslQueue(-1);
- }
-
- size_t metadataIdx;
- nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
-
- if (candidateTimestamp == -1) {
- ALOGE("%s: Could not find good candidate for ZSL reprocessing",
- __FUNCTION__);
- return NOT_ENOUGH_DATA;
- }
-
- res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
- /*actualTimestamp*/NULL);
-
- if (res == mZslStream->NO_BUFFER_AVAILABLE) {
- ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
- return NOT_ENOUGH_DATA;
- } else if (res != OK) {
- ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- {
- CameraMetadata request = mFrameList[metadataIdx];
-
- // Verify that the frame is reasonable for reprocessing
-
- camera_metadata_entry_t entry;
- entry = request.find(ANDROID_CONTROL_AE_STATE);
- if (entry.count == 0) {
- ALOGE("%s: ZSL queue frame has no AE state field!",
- __FUNCTION__);
- return BAD_VALUE;
- }
- if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
- entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
- ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
- __FUNCTION__, entry.data.u8[0]);
- return NOT_ENOUGH_DATA;
- }
-
- uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
- res = request.update(ANDROID_REQUEST_TYPE,
- &requestType, 1);
- if (res != OK) {
- ALOGE("%s: Unable to update request type",
- __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- int32_t inputStreams[1] =
- { mZslStreamId };
- res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
- inputStreams, 1);
- if (res != OK) {
- ALOGE("%s: Unable to update request input streams",
- __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- uint8_t captureIntent =
- static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
- res = request.update(ANDROID_CONTROL_CAPTURE_INTENT,
- &captureIntent, 1);
- if (res != OK ) {
- ALOGE("%s: Unable to update request capture intent",
- __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- // TODO: Shouldn't we also update the latest preview frame?
- int32_t outputStreams[1] =
- { client->getCaptureStreamId() };
- res = request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams, 1);
- if (res != OK) {
- ALOGE("%s: Unable to update request output streams",
- __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- res = request.update(ANDROID_REQUEST_ID,
- &requestId, 1);
- if (res != OK ) {
- ALOGE("%s: Unable to update frame to a reprocess request",
- __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- res = client->stopStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
- "%s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
- return INVALID_OPERATION;
- }
-
- // Update JPEG settings
- {
- SharedParameters::Lock l(client->getParameters());
- res = l.mParameters.updateRequestJpeg(&request);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
- "capture request: %s (%d)", __FUNCTION__,
- client->getCameraId(),
- strerror(-res), res);
- return res;
- }
- }
-
- // Update post-processing settings
- res = updateRequestWithDefaultStillRequest(request);
- if (res != OK) {
- ALOGW("%s: Unable to update post-processing tags, the reprocessed image quality "
- "may be compromised", __FUNCTION__);
- }
-
- mLatestCapturedRequest = request;
- res = client->getCameraDevice()->capture(request);
- if (res != OK ) {
- ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
- " (%d)", __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- mState = LOCKED;
- }
-
- return OK;
-}
-
-status_t ZslProcessor3::clearZslQueue() {
- Mutex::Autolock l(mInputMutex);
- // If in middle of capture, can't clear out queue
- if (mState == LOCKED) return OK;
-
- return clearZslQueueLocked();
-}
-
-status_t ZslProcessor3::clearZslQueueLocked() {
- if (mZslStream != 0) {
- // clear result metadata list first.
- clearZslResultQueueLocked();
- return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
- }
- return OK;
-}
-
-void ZslProcessor3::clearZslResultQueueLocked() {
- mFrameList.clear();
- mFrameListHead = 0;
- mFrameList.insertAt(0, mFrameListDepth);
-}
-
-void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const {
- Mutex::Autolock l(mInputMutex);
- if (!mLatestCapturedRequest.isEmpty()) {
- String8 result(" Latest ZSL capture request:\n");
- write(fd, result.string(), result.size());
- mLatestCapturedRequest.dump(fd, 2, 6);
- } else {
- String8 result(" Latest ZSL capture request: none yet\n");
- write(fd, result.string(), result.size());
- }
- dumpZslQueue(fd);
-}
-
-bool ZslProcessor3::threadLoop() {
- // TODO: remove dependency on thread. For now, shut thread down right
- // away.
- return false;
-}
-
-void ZslProcessor3::dumpZslQueue(int fd) const {
- String8 header("ZSL queue contents:");
- String8 indent(" ");
- ALOGV("%s", header.string());
- if (fd != -1) {
- header = indent + header + "\n";
- write(fd, header.string(), header.size());
- }
- for (size_t i = 0; i < mZslQueue.size(); i++) {
- const ZslPair &queueEntry = mZslQueue[i];
- nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
- camera_metadata_ro_entry_t entry;
- nsecs_t frameTimestamp = 0;
- int frameAeState = -1;
- if (!queueEntry.frame.isEmpty()) {
- entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
- if (entry.count > 0) frameTimestamp = entry.data.i64[0];
- entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE);
- if (entry.count > 0) frameAeState = entry.data.u8[0];
- }
- String8 result =
- String8::format(" %zu: b: %" PRId64 "\tf: %" PRId64 ", AE state: %d", i,
- bufferTimestamp, frameTimestamp, frameAeState);
- ALOGV("%s", result.string());
- if (fd != -1) {
- result = indent + result + "\n";
- write(fd, result.string(), result.size());
- }
-
- }
-}
-
-bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const {
- switch (afMode) {
- case ANDROID_CONTROL_AF_MODE_AUTO:
- case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
- case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
- case ANDROID_CONTROL_AF_MODE_MACRO:
- return false;
- break;
- case ANDROID_CONTROL_AF_MODE_OFF:
- case ANDROID_CONTROL_AF_MODE_EDOF:
- return true;
- default:
- ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
- return false;
- }
-}
-
-nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
- /**
- * Find the smallest timestamp we know about so far
- * - ensure that aeState is either converged or locked
- */
-
- size_t idx = 0;
- nsecs_t minTimestamp = -1;
-
- size_t emptyCount = mFrameList.size();
-
- for (size_t j = 0; j < mFrameList.size(); j++) {
- const CameraMetadata &frame = mFrameList[j];
- if (!frame.isEmpty()) {
-
- emptyCount--;
-
- camera_metadata_ro_entry_t entry;
- entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
- if (entry.count == 0) {
- ALOGE("%s: Can't find timestamp in frame!",
- __FUNCTION__);
- continue;
- }
- nsecs_t frameTimestamp = entry.data.i64[0];
- if (minTimestamp > frameTimestamp || minTimestamp == -1) {
-
- entry = frame.find(ANDROID_CONTROL_AE_STATE);
-
- if (entry.count == 0) {
- /**
- * This is most likely a HAL bug. The aeState field is
- * mandatory, so it should always be in a metadata packet.
- */
- ALOGW("%s: ZSL queue frame has no AE state field!",
- __FUNCTION__);
- continue;
- }
- if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
- entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
- ALOGVV("%s: ZSL queue frame AE state is %d, need "
- "full capture", __FUNCTION__, entry.data.u8[0]);
- continue;
- }
-
- entry = frame.find(ANDROID_CONTROL_AF_MODE);
- if (entry.count == 0) {
- ALOGW("%s: ZSL queue frame has no AF mode field!",
- __FUNCTION__);
- continue;
- }
- uint8_t afMode = entry.data.u8[0];
- if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
- // Skip all the ZSL buffer for manual AF mode, as we don't really
- // know the af state.
- continue;
- }
-
- // Check AF state if device has focuser and focus mode isn't fixed
- if (mHasFocuser && !isFixedFocusMode(afMode)) {
- // Make sure the candidate frame has good focus.
- entry = frame.find(ANDROID_CONTROL_AF_STATE);
- if (entry.count == 0) {
- ALOGW("%s: ZSL queue frame has no AF state field!",
- __FUNCTION__);
- continue;
- }
- uint8_t afState = entry.data.u8[0];
- if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
- afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
- afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
- ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
- __FUNCTION__, afState);
- continue;
- }
- }
-
- minTimestamp = frameTimestamp;
- idx = j;
- }
-
- ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
- }
- }
-
- if (emptyCount == mFrameList.size()) {
- /**
- * This could be mildly bad and means our ZSL was triggered before
- * there were any frames yet received by the camera framework.
- *
- * This is a fairly corner case which can happen under:
- * + a user presses the shutter button real fast when the camera starts
- * (startPreview followed immediately by takePicture).
- * + burst capture case (hitting shutter button as fast possible)
- *
- * If this happens in steady case (preview running for a while, call
- * a single takePicture) then this might be a fwk bug.
- */
- ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
- }
-
- ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
- __FUNCTION__, minTimestamp, idx, emptyCount);
-
- if (metadataIdx) {
- *metadataIdx = idx;
- }
-
- return minTimestamp;
-}
-
-void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
- // Intentionally left empty
- // Although theoretically we could use this to get better dump info
-}
-
-void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
-
- // ignore output buffers
- if (bufferInfo.mOutput) {
- return;
- }
-
- // Lock mutex only once we know this is an input buffer returned to avoid
- // potential deadlock
- Mutex::Autolock l(mInputMutex);
- // TODO: Verify that the buffer is in our queue by looking at timestamp
- // theoretically unnecessary unless we change the following assumptions:
- // -- only 1 buffer reprocessed at a time (which is the case now)
-
- // Erase entire ZSL queue since we've now completed the capture and preview
- // is stopped.
- //
- // We need to guarantee that if we do two back-to-back captures,
- // the second won't use a buffer that's older/the same as the first, which
- // is theoretically possible if we don't clear out the queue and the
- // selection criteria is something like 'newest'. Clearing out the result
- // metadata queue on a completed capture ensures we'll only use new timestamp.
- // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
- // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
- // to hold the same lock.
- // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
- // it is safe not to do so, as back to back ZSL capture requires stop and start
- // preview, which will flush ZSL queue automatically.
- ALOGV("%s: Memory optimization, clearing ZSL queue",
- __FUNCTION__);
- clearZslResultQueueLocked();
-
- // Required so we accept more ZSL requests
- mState = RUNNING;
-}
-
-}; // namespace camera2
-}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
deleted file mode 100644
index 2960478..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
-
-#include <utils/Thread.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-#include <utils/Mutex.h>
-#include <utils/Condition.h>
-#include <gui/BufferItem.h>
-#include <gui/BufferItemConsumer.h>
-#include <camera/CameraMetadata.h>
-
-#include "api1/client2/FrameProcessor.h"
-#include "api1/client2/ZslProcessorInterface.h"
-#include "device3/Camera3ZslStream.h"
-
-namespace android {
-
-class Camera2Client;
-
-namespace camera2 {
-
-class CaptureSequencer;
-class Parameters;
-
-/***
- * ZSL queue processing
- */
-class ZslProcessor3 :
- public ZslProcessorInterface,
- public camera3::Camera3StreamBufferListener,
- virtual public Thread,
- virtual public FrameProcessor::FilteredListener {
- public:
- ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
- ~ZslProcessor3();
-
- // From FrameProcessor::FilteredListener
- virtual void onResultAvailable(const CaptureResult &result);
-
- /**
- ****************************************
- * ZslProcessorInterface implementation *
- ****************************************
- */
-
- virtual status_t updateStream(const Parameters ¶ms);
- virtual status_t deleteStream();
- virtual int getStreamId() const;
-
- virtual status_t pushToReprocess(int32_t requestId);
- virtual status_t clearZslQueue();
-
- void dump(int fd, const Vector<String16>& args) const;
-
- protected:
- /**
- **********************************************
- * Camera3StreamBufferListener implementation *
- **********************************************
- */
- typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
- // Buffer was acquired by the HAL
- virtual void onBufferAcquired(const BufferInfo& bufferInfo);
- // Buffer was released by the HAL
- virtual void onBufferReleased(const BufferInfo& bufferInfo);
-
- private:
- static const nsecs_t kWaitDuration = 10000000; // 10 ms
- nsecs_t mLatestClearedBufferTimestamp;
-
- enum {
- RUNNING,
- LOCKED
- } mState;
-
- wp<Camera2Client> mClient;
- wp<CaptureSequencer> mSequencer;
-
- const int mId;
-
- mutable Mutex mInputMutex;
-
- enum {
- NO_STREAM = -1
- };
-
- int mZslStreamId;
- sp<camera3::Camera3ZslStream> mZslStream;
-
- struct ZslPair {
- BufferItem buffer;
- CameraMetadata frame;
- };
-
- static const int32_t kDefaultMaxPipelineDepth = 4;
- size_t mBufferQueueDepth;
- size_t mFrameListDepth;
- Vector<CameraMetadata> mFrameList;
- size_t mFrameListHead;
-
- ZslPair mNextPair;
-
- Vector<ZslPair> mZslQueue;
- size_t mZslQueueHead;
- size_t mZslQueueTail;
-
- CameraMetadata mLatestCapturedRequest;
-
- bool mHasFocuser;
-
- virtual bool threadLoop();
-
- status_t clearZslQueueLocked();
-
- void clearZslResultQueueLocked();
-
- void dumpZslQueue(int id) const;
-
- nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
-
- bool isFixedFocusMode(uint8_t afMode) const;
-
- // Update the post-processing metadata with the default still capture request template
- status_t updateRequestWithDefaultStillRequest(CameraMetadata &request) const;
-};
-
-
-}; //namespace camera2
-}; //namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
deleted file mode 100644
index 9efeaba..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ZslProcessorInterface.h"
-
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
- return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
deleted file mode 100644
index 9e266e7..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
-
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-
-namespace android {
-namespace camera2 {
-
-class Parameters;
-
-class ZslProcessorInterface : virtual public RefBase {
-public:
-
- // Get ID for use with android.request.outputStreams / inputStreams
- virtual int getStreamId() const = 0;
-
- // Update the streams by recreating them if the size/format has changed
- virtual status_t updateStream(const Parameters& params) = 0;
-
- // Delete the underlying CameraDevice streams
- virtual status_t deleteStream() = 0;
-
- // Clear any additional state necessary before the CameraDevice is disconnected
- virtual status_t disconnect();
-
- /**
- * Submits a ZSL capture request (id = requestId)
- *
- * An appropriate ZSL buffer is selected by the closest timestamp,
- * then we push that buffer to be reprocessed by the HAL.
- * A capture request is created and submitted on behalf of the client.
- */
- virtual status_t pushToReprocess(int32_t requestId) = 0;
-
- // Flush the ZSL buffer queue, freeing up all the buffers
- virtual status_t clearZslQueue() = 0;
-
- // (Debugging only) Dump the current state to the specified file descriptor
- virtual void dump(int fd, const Vector<String16>& args) const = 0;
-};
-
-}; //namespace camera2
-}; //namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 5732f80..82c8fe9 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -31,7 +31,7 @@
#include "api2/CameraDeviceClient.h"
-#include "CameraDeviceFactory.h"
+#include "device3/Camera3Device.h"
namespace android {
using namespace camera2;
@@ -62,7 +62,7 @@
String8(clientPackageName).string(), clientPid, clientUid);
mInitialClientPid = clientPid;
- mDevice = CameraDeviceFactory::createDevice(cameraId);
+ mDevice = new Camera3Device(cameraId);
LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 220c5ad..53122dc 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -125,7 +125,7 @@
// that mBinderSerializationLock is locked when they're called
mutable Mutex mBinderSerializationLock;
- /** CameraDeviceBase instance wrapping HAL2+ entry */
+ /** CameraDeviceBase instance wrapping HAL3+ entry */
const int mDeviceVersion;
sp<CameraDeviceBase> mDevice;
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index 16b8aba..d7a1568 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -27,11 +27,6 @@
void CameraModule::deriveCameraCharacteristicsKeys(
uint32_t deviceVersion, CameraMetadata &chars) {
ATRACE_CALL();
- // HAL1 devices should not reach here
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
- ALOGV("%s: Cannot derive keys for HAL version < 2.0");
- return;
- }
// Keys added in HAL3.3
if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
@@ -211,7 +206,7 @@
return ret;
}
int deviceVersion = rawInfo.device_version;
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_0) {
// static_camera_characteristics is invalid
*info = rawInfo;
return ret;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
deleted file mode 100644
index d74f976..0000000
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ /dev/null
@@ -1,1618 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-Device"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-//#define LOG_NNDEBUG 0 // Per-frame verbose logging
-
-#ifdef LOG_NNDEBUG
-#define ALOGVV(...) ALOGV(__VA_ARGS__)
-#else
-#define ALOGVV(...) ((void)0)
-#endif
-
-#include <inttypes.h>
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include <utils/Timers.h>
-#include "Camera2Device.h"
-#include "CameraService.h"
-
-namespace android {
-
-Camera2Device::Camera2Device(int id):
- mId(id),
- mHal2Device(NULL)
-{
- ATRACE_CALL();
- ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
-}
-
-Camera2Device::~Camera2Device()
-{
- ATRACE_CALL();
- ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId);
- disconnect();
-}
-
-int Camera2Device::getId() const {
- return mId;
-}
-
-status_t Camera2Device::initialize(CameraModule *module)
-{
- ATRACE_CALL();
- ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
- if (mHal2Device != NULL) {
- ALOGE("%s: Already initialized!", __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- status_t res;
- char name[10];
- snprintf(name, sizeof(name), "%d", mId);
-
- camera2_device_t *device;
-
- res = module->open(name, reinterpret_cast<hw_device_t**>(&device));
-
- if (res != OK) {
- ALOGE("%s: Could not open camera %d: %s (%d)", __FUNCTION__,
- mId, strerror(-res), res);
- return res;
- }
-
- if (device->common.version != CAMERA_DEVICE_API_VERSION_2_0) {
- ALOGE("%s: Could not open camera %d: "
- "Camera device is not version %x, reports %x instead",
- __FUNCTION__, mId, CAMERA_DEVICE_API_VERSION_2_0,
- device->common.version);
- device->common.close(&device->common);
- return BAD_VALUE;
- }
-
- camera_info info;
- res = module->getCameraInfo(mId, &info);
- if (res != OK ) return res;
-
- if (info.device_version != device->common.version) {
- ALOGE("%s: HAL reporting mismatched camera_info version (%x)"
- " and device version (%x).", __FUNCTION__,
- device->common.version, info.device_version);
- device->common.close(&device->common);
- return BAD_VALUE;
- }
-
- res = mRequestQueue.setConsumerDevice(device);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to connect request queue to device: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- device->common.close(&device->common);
- return res;
- }
- res = mFrameQueue.setProducerDevice(device);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to connect frame queue to device: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- device->common.close(&device->common);
- return res;
- }
-
- res = device->ops->set_notify_callback(device, notificationCallback,
- NULL);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to initialize notification callback!",
- __FUNCTION__, mId);
- device->common.close(&device->common);
- return res;
- }
-
- mDeviceInfo = info.static_camera_characteristics;
- mHal2Device = device;
- mDeviceVersion = device->common.version;
-
- return OK;
-}
-
-status_t Camera2Device::disconnect() {
- ATRACE_CALL();
- status_t res = OK;
- if (mHal2Device) {
- ALOGV("%s: Closing device for camera %d", __FUNCTION__, mId);
-
- int inProgressCount = mHal2Device->ops->get_in_progress_count(mHal2Device);
- if (inProgressCount > 0) {
- ALOGW("%s: Closing camera device %d with %d requests in flight!",
- __FUNCTION__, mId, inProgressCount);
- }
- mReprocessStreams.clear();
- mStreams.clear();
- res = mHal2Device->common.close(&mHal2Device->common);
- if (res != OK) {
- ALOGE("%s: Could not close camera %d: %s (%d)",
- __FUNCTION__,
- mId, strerror(-res), res);
- }
- mHal2Device = NULL;
- ALOGV("%s: Shutdown complete", __FUNCTION__);
- }
- return res;
-}
-
-status_t Camera2Device::dump(int fd, const Vector<String16>& args) {
- ATRACE_CALL();
- String8 result;
- int detailLevel = 0;
- int n = args.size();
- String16 detailOption("-d");
- for (int i = 0; i + 1 < n; i++) {
- if (args[i] == detailOption) {
- String8 levelStr(args[i+1]);
- detailLevel = atoi(levelStr.string());
- }
- }
-
- result.appendFormat(" Camera2Device[%d] dump (detail level %d):\n",
- mId, detailLevel);
-
- if (detailLevel > 0) {
- result = " Request queue contents:\n";
- write(fd, result.string(), result.size());
- mRequestQueue.dump(fd, args);
-
- result = " Frame queue contents:\n";
- write(fd, result.string(), result.size());
- mFrameQueue.dump(fd, args);
- }
-
- result = " Active streams:\n";
- write(fd, result.string(), result.size());
- for (StreamList::iterator s = mStreams.begin(); s != mStreams.end(); s++) {
- (*s)->dump(fd, args);
- }
-
- result = " HAL device dump:\n";
- write(fd, result.string(), result.size());
-
- status_t res;
- res = mHal2Device->ops->dump(mHal2Device, fd);
-
- return res;
-}
-
-const CameraMetadata& Camera2Device::info() const {
- ALOGVV("%s: E", __FUNCTION__);
-
- return mDeviceInfo;
-}
-
-status_t Camera2Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
-
- mRequestQueue.enqueue(request.release());
- return OK;
-}
-
-status_t Camera2Device::captureList(const List<const CameraMetadata> &requests,
- int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
- ALOGE("%s: Camera2Device burst capture not implemented", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-status_t Camera2Device::setStreamingRequest(const CameraMetadata &request,
- int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- CameraMetadata streamRequest(request);
- return mRequestQueue.setStreamSlot(streamRequest.release());
-}
-
-status_t Camera2Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
- int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
- ALOGE("%s, Camera2Device streaming burst not implemented", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-status_t Camera2Device::clearStreamingRequest(int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
- return mRequestQueue.setStreamSlot(NULL);
-}
-
-status_t Camera2Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) {
- ATRACE_CALL();
- return mRequestQueue.waitForDequeue(requestId, timeout);
-}
-
-status_t Camera2Device::createStream(sp<Surface> consumer,
- uint32_t width, uint32_t height, int format,
- android_dataspace /*dataSpace*/, camera3_stream_rotation_t rotation, int *id) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: E", __FUNCTION__);
-
- sp<StreamAdapter> stream = new StreamAdapter(mHal2Device);
- size_t size = 0;
- if (format == HAL_PIXEL_FORMAT_BLOB) {
- size = getJpegBufferSize(width, height);
- }
- res = stream->connectToDevice(consumer, width, height, format, size);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create stream (%d x %d, format %x):"
- "%s (%d)",
- __FUNCTION__, mId, width, height, format, strerror(-res), res);
- return res;
- }
-
- *id = stream->getId();
-
- mStreams.push_back(stream);
- return OK;
-}
-
-ssize_t Camera2Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
- // Always give the max jpeg buffer size regardless of the actual jpeg resolution.
- camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
- if (jpegBufMaxSize.count == 0) {
- ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
- return BAD_VALUE;
- }
-
- return jpegBufMaxSize.data.i32[0];
-}
-
-status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: E", __FUNCTION__);
-
- bool found = false;
- StreamList::iterator streamI;
- for (streamI = mStreams.begin();
- streamI != mStreams.end(); streamI++) {
- if ((*streamI)->getId() == outputId) {
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Output stream %d doesn't exist; can't create "
- "reprocess stream from it!", __FUNCTION__, mId, outputId);
- return BAD_VALUE;
- }
-
- sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mHal2Device);
-
- res = stream->connectToDevice((*streamI));
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create reprocessing stream from "\
- "stream %d: %s (%d)", __FUNCTION__, mId, outputId,
- strerror(-res), res);
- return res;
- }
-
- *id = stream->getId();
-
- mReprocessStreams.push_back(stream);
- return OK;
-}
-
-
-status_t Camera2Device::getStreamInfo(int id,
- uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- bool found = false;
- StreamList::iterator streamI;
- for (streamI = mStreams.begin();
- streamI != mStreams.end(); streamI++) {
- if ((*streamI)->getId() == id) {
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Stream %d does not exist",
- __FUNCTION__, mId, id);
- return BAD_VALUE;
- }
-
- if (width) *width = (*streamI)->getWidth();
- if (height) *height = (*streamI)->getHeight();
- if (format) *format = (*streamI)->getFormat();
- if (dataSpace) *dataSpace = HAL_DATASPACE_UNKNOWN;
-
- return OK;
-}
-
-status_t Camera2Device::setStreamTransform(int id,
- int transform) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- bool found = false;
- StreamList::iterator streamI;
- for (streamI = mStreams.begin();
- streamI != mStreams.end(); streamI++) {
- if ((*streamI)->getId() == id) {
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Stream %d does not exist",
- __FUNCTION__, mId, id);
- return BAD_VALUE;
- }
-
- return (*streamI)->setTransform(transform);
-}
-
-status_t Camera2Device::deleteStream(int id) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- bool found = false;
- for (StreamList::iterator streamI = mStreams.begin();
- streamI != mStreams.end(); streamI++) {
- if ((*streamI)->getId() == id) {
- status_t res = (*streamI)->release();
- if (res != OK) {
- ALOGE("%s: Unable to release stream %d from HAL device: "
- "%s (%d)", __FUNCTION__, id, strerror(-res), res);
- return res;
- }
- mStreams.erase(streamI);
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Unable to find stream %d to delete",
- __FUNCTION__, mId, id);
- return BAD_VALUE;
- }
- return OK;
-}
-
-status_t Camera2Device::deleteReprocessStream(int id) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- bool found = false;
- for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
- streamI != mReprocessStreams.end(); streamI++) {
- if ((*streamI)->getId() == id) {
- status_t res = (*streamI)->release();
- if (res != OK) {
- ALOGE("%s: Unable to release reprocess stream %d from "
- "HAL device: %s (%d)", __FUNCTION__, id,
- strerror(-res), res);
- return res;
- }
- mReprocessStreams.erase(streamI);
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Unable to find stream %d to delete",
- __FUNCTION__, mId, id);
- return BAD_VALUE;
- }
- return OK;
-}
-
-status_t Camera2Device::configureStreams(bool isConstrainedHighSpeed) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
-
- /**
- * HAL2 devices do not need to configure streams;
- * streams are created on the fly.
- */
- ALOGW("%s: No-op for HAL2 devices", __FUNCTION__);
-
- return OK;
-}
-
-
-status_t Camera2Device::createDefaultRequest(int templateId,
- CameraMetadata *request) {
- ATRACE_CALL();
- status_t err;
- ALOGV("%s: E", __FUNCTION__);
- camera_metadata_t *rawRequest;
- err = mHal2Device->ops->construct_default_request(
- mHal2Device, templateId, &rawRequest);
- request->acquire(rawRequest);
- return err;
-}
-
-status_t Camera2Device::waitUntilDrained() {
- ATRACE_CALL();
- static const uint32_t kSleepTime = 50000; // 50 ms
- static const uint32_t kMaxSleepTime = 10000000; // 10 s
- ALOGV("%s: Camera %d: Starting wait", __FUNCTION__, mId);
- if (mRequestQueue.getBufferCount() ==
- CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS) return INVALID_OPERATION;
-
- // TODO: Set up notifications from HAL, instead of sleeping here
- uint32_t totalTime = 0;
- while (mHal2Device->ops->get_in_progress_count(mHal2Device) > 0) {
- usleep(kSleepTime);
- totalTime += kSleepTime;
- if (totalTime > kMaxSleepTime) {
- ALOGE("%s: Waited %d us, %d requests still in flight", __FUNCTION__,
- totalTime, mHal2Device->ops->get_in_progress_count(mHal2Device));
- return TIMED_OUT;
- }
- }
- ALOGV("%s: Camera %d: HAL is idle", __FUNCTION__, mId);
- return OK;
-}
-
-status_t Camera2Device::setNotifyCallback(NotificationListener *listener) {
- ATRACE_CALL();
- status_t res;
- res = mHal2Device->ops->set_notify_callback(mHal2Device, notificationCallback,
- reinterpret_cast<void*>(listener) );
- if (res != OK) {
- ALOGE("%s: Unable to set notification callback!", __FUNCTION__);
- }
- return res;
-}
-
-bool Camera2Device::willNotify3A() {
- return true;
-}
-
-void Camera2Device::notificationCallback(int32_t msg_type,
- int32_t ext1,
- int32_t ext2,
- int32_t ext3,
- void *user) {
- ATRACE_CALL();
- NotificationListener *listener = reinterpret_cast<NotificationListener*>(user);
- ALOGV("%s: Notification %d, arguments %d, %d, %d", __FUNCTION__, msg_type,
- ext1, ext2, ext3);
- if (listener != NULL) {
- switch (msg_type) {
- case CAMERA2_MSG_ERROR:
- // TODO: This needs to be fixed. ext2 and ext3 need to be considered.
- listener->notifyError(
- ((ext1 == CAMERA2_MSG_ERROR_DEVICE)
- || (ext1 == CAMERA2_MSG_ERROR_HARDWARE)) ?
- ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE :
- ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE,
- CaptureResultExtras());
- break;
- case CAMERA2_MSG_SHUTTER: {
- // TODO: Only needed for camera2 API, which is unsupported
- // by HAL2 directly.
- // nsecs_t timestamp = (nsecs_t)ext2 | ((nsecs_t)(ext3) << 32 );
- // listener->notifyShutter(requestId, timestamp);
- break;
- }
- case CAMERA2_MSG_AUTOFOCUS:
- listener->notifyAutoFocus(ext1, ext2);
- break;
- case CAMERA2_MSG_AUTOEXPOSURE:
- listener->notifyAutoExposure(ext1, ext2);
- break;
- case CAMERA2_MSG_AUTOWB:
- listener->notifyAutoWhitebalance(ext1, ext2);
- break;
- default:
- ALOGE("%s: Unknown notification %d (arguments %d, %d, %d)!",
- __FUNCTION__, msg_type, ext1, ext2, ext3);
- }
- }
-}
-
-status_t Camera2Device::waitForNextFrame(nsecs_t timeout) {
- return mFrameQueue.waitForBuffer(timeout);
-}
-
-status_t Camera2Device::getNextResult(CaptureResult *result) {
- ATRACE_CALL();
- ALOGV("%s: get CaptureResult", __FUNCTION__);
- if (result == NULL) {
- ALOGE("%s: result pointer is NULL", __FUNCTION__);
- return BAD_VALUE;
- }
- status_t res;
- camera_metadata_t *rawFrame;
- res = mFrameQueue.dequeue(&rawFrame);
- if (rawFrame == NULL) {
- return NOT_ENOUGH_DATA;
- } else if (res == OK) {
- result->mMetadata.acquire(rawFrame);
- }
-
- return res;
-}
-
-status_t Camera2Device::triggerAutofocus(uint32_t id) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id);
- res = mHal2Device->ops->trigger_action(mHal2Device,
- CAMERA2_TRIGGER_AUTOFOCUS, id, 0);
- if (res != OK) {
- ALOGE("%s: Error triggering autofocus (id %d)",
- __FUNCTION__, id);
- }
- return res;
-}
-
-status_t Camera2Device::triggerCancelAutofocus(uint32_t id) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: Canceling autofocus, id %d", __FUNCTION__, id);
- res = mHal2Device->ops->trigger_action(mHal2Device,
- CAMERA2_TRIGGER_CANCEL_AUTOFOCUS, id, 0);
- if (res != OK) {
- ALOGE("%s: Error canceling autofocus (id %d)",
- __FUNCTION__, id);
- }
- return res;
-}
-
-status_t Camera2Device::triggerPrecaptureMetering(uint32_t id) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id);
- res = mHal2Device->ops->trigger_action(mHal2Device,
- CAMERA2_TRIGGER_PRECAPTURE_METERING, id, 0);
- if (res != OK) {
- ALOGE("%s: Error triggering precapture metering (id %d)",
- __FUNCTION__, id);
- }
- return res;
-}
-
-status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- bool found = false;
- status_t res = OK;
- for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
- streamI != mReprocessStreams.end(); streamI++) {
- if ((*streamI)->getId() == reprocessStreamId) {
- res = (*streamI)->pushIntoStream(buffer, listener);
- if (res != OK) {
- ALOGE("%s: Unable to push buffer to reprocess stream %d: %s (%d)",
- __FUNCTION__, reprocessStreamId, strerror(-res), res);
- return res;
- }
- found = true;
- break;
- }
- }
- if (!found) {
- ALOGE("%s: Camera %d: Unable to find reprocess stream %d",
- __FUNCTION__, mId, reprocessStreamId);
- res = BAD_VALUE;
- }
- return res;
-}
-
-status_t Camera2Device::flush(int64_t* /*lastFrameNumber*/) {
- ATRACE_CALL();
-
- mRequestQueue.clear();
- return waitUntilDrained();
-}
-
-status_t Camera2Device::prepare(int streamId) {
- ATRACE_CALL();
- ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
- return NO_INIT;
-}
-
-status_t Camera2Device::tearDown(int streamId) {
- ATRACE_CALL();
- ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
- return NO_INIT;
-}
-
-status_t Camera2Device::prepare(int maxCount, int streamId) {
- ATRACE_CALL();
- ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
- return NO_INIT;
-}
-
-uint32_t Camera2Device::getDeviceVersion() {
- ATRACE_CALL();
- return mDeviceVersion;
-}
-
-/**
- * Camera2Device::MetadataQueue
- */
-
-Camera2Device::MetadataQueue::MetadataQueue():
- mHal2Device(NULL),
- mFrameCount(0),
- mLatestRequestId(0),
- mCount(0),
- mStreamSlotCount(0),
- mSignalConsumer(true)
-{
- ATRACE_CALL();
- camera2_request_queue_src_ops::dequeue_request = consumer_dequeue;
- camera2_request_queue_src_ops::request_count = consumer_buffer_count;
- camera2_request_queue_src_ops::free_request = consumer_free;
-
- camera2_frame_queue_dst_ops::dequeue_frame = producer_dequeue;
- camera2_frame_queue_dst_ops::cancel_frame = producer_cancel;
- camera2_frame_queue_dst_ops::enqueue_frame = producer_enqueue;
-}
-
-Camera2Device::MetadataQueue::~MetadataQueue() {
- ATRACE_CALL();
- clear();
-}
-
-// Connect to camera2 HAL as consumer (input requests/reprocessing)
-status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) {
- ATRACE_CALL();
- status_t res;
- res = d->ops->set_request_queue_src_ops(d,
- this);
- if (res != OK) return res;
- mHal2Device = d;
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::setProducerDevice(camera2_device_t *d) {
- ATRACE_CALL();
- status_t res;
- res = d->ops->set_frame_queue_dst_ops(d,
- this);
- return res;
-}
-
-// Real interfaces
-status_t Camera2Device::MetadataQueue::enqueue(camera_metadata_t *buf) {
- ATRACE_CALL();
- ALOGVV("%s: E", __FUNCTION__);
- Mutex::Autolock l(mMutex);
-
- mCount++;
- mEntries.push_back(buf);
-
- return signalConsumerLocked();
-}
-
-int Camera2Device::MetadataQueue::getBufferCount() {
- ATRACE_CALL();
- Mutex::Autolock l(mMutex);
- if (mStreamSlotCount > 0) {
- return CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS;
- }
- return mCount;
-}
-
-status_t Camera2Device::MetadataQueue::dequeue(camera_metadata_t **buf,
- bool incrementCount)
-{
- ATRACE_CALL();
- ALOGVV("%s: E", __FUNCTION__);
- status_t res;
- Mutex::Autolock l(mMutex);
-
- if (mCount == 0) {
- if (mStreamSlotCount == 0) {
- ALOGVV("%s: Empty", __FUNCTION__);
- *buf = NULL;
- mSignalConsumer = true;
- return OK;
- }
- ALOGVV("%s: Streaming %d frames to queue", __FUNCTION__,
- mStreamSlotCount);
-
- for (List<camera_metadata_t*>::iterator slotEntry = mStreamSlot.begin();
- slotEntry != mStreamSlot.end();
- slotEntry++ ) {
- size_t entries = get_camera_metadata_entry_count(*slotEntry);
- size_t dataBytes = get_camera_metadata_data_count(*slotEntry);
-
- camera_metadata_t *copy =
- allocate_camera_metadata(entries, dataBytes);
- append_camera_metadata(copy, *slotEntry);
- mEntries.push_back(copy);
- }
- mCount = mStreamSlotCount;
- }
- ALOGVV("MetadataQueue: deque (%d buffers)", mCount);
- camera_metadata_t *b = *(mEntries.begin());
- mEntries.erase(mEntries.begin());
-
- if (incrementCount) {
- ATRACE_INT("cam2_request", mFrameCount);
- camera_metadata_entry_t frameCount;
- res = find_camera_metadata_entry(b,
- ANDROID_REQUEST_FRAME_COUNT,
- &frameCount);
- if (res != OK) {
- ALOGE("%s: Unable to add frame count: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- } else {
- *frameCount.data.i32 = mFrameCount;
- }
- mFrameCount++;
- }
-
- // Check for request ID, and if present, signal waiters.
- camera_metadata_entry_t requestId;
- res = find_camera_metadata_entry(b,
- ANDROID_REQUEST_ID,
- &requestId);
- if (res == OK) {
- mLatestRequestId = requestId.data.i32[0];
- mNewRequestId.signal();
- }
-
- *buf = b;
- mCount--;
-
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::waitForBuffer(nsecs_t timeout)
-{
- Mutex::Autolock l(mMutex);
- status_t res;
- while (mCount == 0) {
- res = notEmpty.waitRelative(mMutex,timeout);
- if (res != OK) return res;
- }
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::waitForDequeue(int32_t id,
- nsecs_t timeout) {
- Mutex::Autolock l(mMutex);
- status_t res;
- while (mLatestRequestId != id) {
- nsecs_t startTime = systemTime();
-
- res = mNewRequestId.waitRelative(mMutex, timeout);
- if (res != OK) return res;
-
- timeout -= (systemTime() - startTime);
- }
-
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::setStreamSlot(camera_metadata_t *buf)
-{
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock l(mMutex);
- if (buf == NULL) {
- freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
- mStreamSlotCount = 0;
- return OK;
- }
-
- if (mStreamSlotCount > 1) {
- List<camera_metadata_t*>::iterator deleter = ++mStreamSlot.begin();
- freeBuffers(++mStreamSlot.begin(), mStreamSlot.end());
- mStreamSlotCount = 1;
- }
- if (mStreamSlotCount == 1) {
- free_camera_metadata( *(mStreamSlot.begin()) );
- *(mStreamSlot.begin()) = buf;
- } else {
- mStreamSlot.push_front(buf);
- mStreamSlotCount = 1;
- }
- return signalConsumerLocked();
-}
-
-status_t Camera2Device::MetadataQueue::setStreamSlot(
- const List<camera_metadata_t*> &bufs)
-{
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
- Mutex::Autolock l(mMutex);
-
- if (mStreamSlotCount > 0) {
- freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
- }
- mStreamSlotCount = 0;
- for (List<camera_metadata_t*>::const_iterator r = bufs.begin();
- r != bufs.end(); r++) {
- mStreamSlot.push_back(*r);
- mStreamSlotCount++;
- }
- return signalConsumerLocked();
-}
-
-status_t Camera2Device::MetadataQueue::clear()
-{
- ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
-
- Mutex::Autolock l(mMutex);
-
- // Clear streaming slot
- freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
- mStreamSlotCount = 0;
-
- // Clear request queue
- freeBuffers(mEntries.begin(), mEntries.end());
- mCount = 0;
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::dump(int fd,
- const Vector<String16>& /*args*/) {
- ATRACE_CALL();
- String8 result;
- status_t notLocked;
- notLocked = mMutex.tryLock();
- if (notLocked) {
- result.append(" (Unable to lock queue mutex)\n");
- }
- result.appendFormat(" Current frame number: %d\n", mFrameCount);
- if (mStreamSlotCount == 0) {
- result.append(" Stream slot: Empty\n");
- write(fd, result.string(), result.size());
- } else {
- result.appendFormat(" Stream slot: %zu entries\n",
- mStreamSlot.size());
- int i = 0;
- for (List<camera_metadata_t*>::iterator r = mStreamSlot.begin();
- r != mStreamSlot.end(); r++) {
- result = String8::format(" Stream slot buffer %d:\n", i);
- write(fd, result.string(), result.size());
- dump_indented_camera_metadata(*r, fd, 2, 10);
- i++;
- }
- }
- if (mEntries.size() == 0) {
- result = " Main queue is empty\n";
- write(fd, result.string(), result.size());
- } else {
- result = String8::format(" Main queue has %zu entries:\n",
- mEntries.size());
- int i = 0;
- for (List<camera_metadata_t*>::iterator r = mEntries.begin();
- r != mEntries.end(); r++) {
- result = String8::format(" Queue entry %d:\n", i);
- write(fd, result.string(), result.size());
- dump_indented_camera_metadata(*r, fd, 2, 10);
- i++;
- }
- }
-
- if (notLocked == 0) {
- mMutex.unlock();
- }
-
- return OK;
-}
-
-status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
- ATRACE_CALL();
- status_t res = OK;
- notEmpty.signal();
- if (mSignalConsumer && mHal2Device != NULL) {
- mSignalConsumer = false;
-
- mMutex.unlock();
- ALOGV("%s: Signaling consumer", __FUNCTION__);
- res = mHal2Device->ops->notify_request_queue_not_empty(mHal2Device);
- mMutex.lock();
- }
- return res;
-}
-
-status_t Camera2Device::MetadataQueue::freeBuffers(
- List<camera_metadata_t*>::iterator start,
- List<camera_metadata_t*>::iterator end)
-{
- ATRACE_CALL();
- while (start != end) {
- free_camera_metadata(*start);
- start = mStreamSlot.erase(start);
- }
- return OK;
-}
-
-Camera2Device::MetadataQueue* Camera2Device::MetadataQueue::getInstance(
- const camera2_request_queue_src_ops_t *q)
-{
- const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
- return const_cast<MetadataQueue*>(cmq);
-}
-
-Camera2Device::MetadataQueue* Camera2Device::MetadataQueue::getInstance(
- const camera2_frame_queue_dst_ops_t *q)
-{
- const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
- return const_cast<MetadataQueue*>(cmq);
-}
-
-int Camera2Device::MetadataQueue::consumer_buffer_count(
- const camera2_request_queue_src_ops_t *q)
-{
- MetadataQueue *queue = getInstance(q);
- return queue->getBufferCount();
-}
-
-int Camera2Device::MetadataQueue::consumer_dequeue(
- const camera2_request_queue_src_ops_t *q,
- camera_metadata_t **buffer)
-{
- MetadataQueue *queue = getInstance(q);
- return queue->dequeue(buffer, true);
-}
-
-int Camera2Device::MetadataQueue::consumer_free(
- const camera2_request_queue_src_ops_t *q,
- camera_metadata_t *old_buffer)
-{
- ATRACE_CALL();
- MetadataQueue *queue = getInstance(q);
- (void)queue;
- free_camera_metadata(old_buffer);
- return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_dequeue(
- const camera2_frame_queue_dst_ops_t * /*q*/,
- size_t entries, size_t bytes,
- camera_metadata_t **buffer)
-{
- ATRACE_CALL();
- camera_metadata_t *new_buffer =
- allocate_camera_metadata(entries, bytes);
- if (new_buffer == NULL) return NO_MEMORY;
- *buffer = new_buffer;
- return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_cancel(
- const camera2_frame_queue_dst_ops_t * /*q*/,
- camera_metadata_t *old_buffer)
-{
- ATRACE_CALL();
- free_camera_metadata(old_buffer);
- return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_enqueue(
- const camera2_frame_queue_dst_ops_t *q,
- camera_metadata_t *filled_buffer)
-{
- MetadataQueue *queue = getInstance(q);
- return queue->enqueue(filled_buffer);
-}
-
-/**
- * Camera2Device::StreamAdapter
- */
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
- (type *)((char*)(ptr) - offsetof(type, member))
-#endif
-
-Camera2Device::StreamAdapter::StreamAdapter(camera2_device_t *d):
- mState(RELEASED),
- mHal2Device(d),
- mId(-1),
- mWidth(0), mHeight(0), mFormat(0), mSize(0), mUsage(0),
- mMaxProducerBuffers(0), mMaxConsumerBuffers(0),
- mTotalBuffers(0),
- mFormatRequested(0),
- mActiveBuffers(0),
- mFrameCount(0),
- mLastTimestamp(0)
-{
- camera2_stream_ops::dequeue_buffer = dequeue_buffer;
- camera2_stream_ops::enqueue_buffer = enqueue_buffer;
- camera2_stream_ops::cancel_buffer = cancel_buffer;
- camera2_stream_ops::set_crop = set_crop;
-}
-
-Camera2Device::StreamAdapter::~StreamAdapter() {
- ATRACE_CALL();
- if (mState != RELEASED) {
- release();
- }
-}
-
-status_t Camera2Device::StreamAdapter::connectToDevice(
- sp<ANativeWindow> consumer,
- uint32_t width, uint32_t height, int format, size_t size) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: E", __FUNCTION__);
-
- if (mState != RELEASED) return INVALID_OPERATION;
- if (consumer == NULL) {
- ALOGE("%s: Null consumer passed to stream adapter", __FUNCTION__);
- return BAD_VALUE;
- }
-
- ALOGV("%s: New stream parameters %d x %d, format 0x%x, size %zu",
- __FUNCTION__, width, height, format, size);
-
- mConsumerInterface = consumer;
- mWidth = width;
- mHeight = height;
- mSize = (format == HAL_PIXEL_FORMAT_BLOB) ? size : 0;
- mFormatRequested = format;
-
- // Allocate device-side stream interface
-
- uint32_t id;
- uint32_t formatActual;
- uint32_t usage;
- uint32_t maxBuffers = 2;
- res = mHal2Device->ops->allocate_stream(mHal2Device,
- mWidth, mHeight, mFormatRequested, getStreamOps(),
- &id, &formatActual, &usage, &maxBuffers);
- if (res != OK) {
- ALOGE("%s: Device stream allocation failed: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- ALOGV("%s: Allocated stream id %d, actual format 0x%x, "
- "usage 0x%x, producer wants %d buffers", __FUNCTION__,
- id, formatActual, usage, maxBuffers);
-
- mId = id;
- mFormat = formatActual;
- mUsage = usage;
- mMaxProducerBuffers = maxBuffers;
-
- mState = ALLOCATED;
-
- // Configure consumer-side ANativeWindow interface
- res = native_window_api_connect(mConsumerInterface.get(),
- NATIVE_WINDOW_API_CAMERA);
- if (res != OK) {
- ALOGE("%s: Unable to connect to native window for stream %d",
- __FUNCTION__, mId);
-
- return res;
- }
-
- mState = CONNECTED;
-
- res = native_window_set_usage(mConsumerInterface.get(), mUsage);
- if (res != OK) {
- ALOGE("%s: Unable to configure usage %08x for stream %d",
- __FUNCTION__, mUsage, mId);
- return res;
- }
-
- res = native_window_set_scaling_mode(mConsumerInterface.get(),
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- if (res != OK) {
- ALOGE("%s: Unable to configure stream scaling: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- res = setTransform(0);
- if (res != OK) {
- return res;
- }
-
- if (mFormat == HAL_PIXEL_FORMAT_BLOB) {
- res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
- mSize, 1);
- if (res != OK) {
- ALOGE("%s: Unable to configure compressed stream buffer dimensions"
- " %d x %d, size %zu for stream %d",
- __FUNCTION__, mWidth, mHeight, mSize, mId);
- return res;
- }
- } else {
- res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
- mWidth, mHeight);
- if (res != OK) {
- ALOGE("%s: Unable to configure stream buffer dimensions"
- " %d x %d for stream %d",
- __FUNCTION__, mWidth, mHeight, mId);
- return res;
- }
- }
-
- res = native_window_set_buffers_format(mConsumerInterface.get(), mFormat);
- if (res != OK) {
- ALOGE("%s: Unable to configure stream buffer format"
- " %#x for stream %d",
- __FUNCTION__, mFormat, mId);
- return res;
- }
-
- int maxConsumerBuffers;
- res = mConsumerInterface->query(mConsumerInterface.get(),
- NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
- if (res != OK) {
- ALOGE("%s: Unable to query consumer undequeued"
- " buffer count for stream %d", __FUNCTION__, mId);
- return res;
- }
- mMaxConsumerBuffers = maxConsumerBuffers;
-
- ALOGV("%s: Consumer wants %d buffers", __FUNCTION__,
- mMaxConsumerBuffers);
-
- mTotalBuffers = mMaxConsumerBuffers + mMaxProducerBuffers;
- mActiveBuffers = 0;
- mFrameCount = 0;
- mLastTimestamp = 0;
-
- res = native_window_set_buffer_count(mConsumerInterface.get(),
- mTotalBuffers);
- if (res != OK) {
- ALOGE("%s: Unable to set buffer count for stream %d",
- __FUNCTION__, mId);
- return res;
- }
-
- // Register allocated buffers with HAL device
- buffer_handle_t *buffers = new buffer_handle_t[mTotalBuffers];
- ANativeWindowBuffer **anwBuffers = new ANativeWindowBuffer*[mTotalBuffers];
- uint32_t bufferIdx = 0;
- for (; bufferIdx < mTotalBuffers; bufferIdx++) {
- res = native_window_dequeue_buffer_and_wait(mConsumerInterface.get(),
- &anwBuffers[bufferIdx]);
- if (res != OK) {
- ALOGE("%s: Unable to dequeue buffer %d for initial registration for "
- "stream %d", __FUNCTION__, bufferIdx, mId);
- goto cleanUpBuffers;
- }
-
- buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
- ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)buffers[bufferIdx]);
- }
-
- ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
- res = mHal2Device->ops->register_stream_buffers(mHal2Device,
- mId,
- mTotalBuffers,
- buffers);
- if (res != OK) {
- ALOGE("%s: Unable to register buffers with HAL device for stream %d",
- __FUNCTION__, mId);
- } else {
- mState = ACTIVE;
- }
-
-cleanUpBuffers:
- ALOGV("%s: Cleaning up %d buffers", __FUNCTION__, bufferIdx);
- for (uint32_t i = 0; i < bufferIdx; i++) {
- res = mConsumerInterface->cancelBuffer(mConsumerInterface.get(),
- anwBuffers[i], -1);
- if (res != OK) {
- ALOGE("%s: Unable to cancel buffer %d after registration",
- __FUNCTION__, i);
- }
- }
- delete[] anwBuffers;
- delete[] buffers;
-
- return res;
-}
-
-status_t Camera2Device::StreamAdapter::release() {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: Releasing stream %d (%d x %d, format %d)", __FUNCTION__, mId,
- mWidth, mHeight, mFormat);
- if (mState >= ALLOCATED) {
- res = mHal2Device->ops->release_stream(mHal2Device, mId);
- if (res != OK) {
- ALOGE("%s: Unable to release stream %d",
- __FUNCTION__, mId);
- return res;
- }
- }
- if (mState >= CONNECTED) {
- res = native_window_api_disconnect(mConsumerInterface.get(),
- NATIVE_WINDOW_API_CAMERA);
-
- /* this is not an error. if client calling process dies,
- the window will also die and all calls to it will return
- DEAD_OBJECT, thus it's already "disconnected" */
- if (res == DEAD_OBJECT) {
- ALOGW("%s: While disconnecting stream %d from native window, the"
- " native window died from under us", __FUNCTION__, mId);
- }
- else if (res != OK) {
- ALOGE("%s: Unable to disconnect stream %d from native window (error %d %s)",
- __FUNCTION__, mId, res, strerror(-res));
- return res;
- }
- }
- mId = -1;
- mState = RELEASED;
- return OK;
-}
-
-status_t Camera2Device::StreamAdapter::setTransform(int transform) {
- ATRACE_CALL();
- status_t res;
- if (mState < CONNECTED) {
- ALOGE("%s: Cannot set transform on unconnected stream", __FUNCTION__);
- return INVALID_OPERATION;
- }
- res = native_window_set_buffers_transform(mConsumerInterface.get(),
- transform);
- if (res != OK) {
- ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
- __FUNCTION__, transform, strerror(-res), res);
- }
- return res;
-}
-
-status_t Camera2Device::StreamAdapter::dump(int fd,
- const Vector<String16>& /*args*/) {
- ATRACE_CALL();
- String8 result = String8::format(" Stream %d: %d x %d, format 0x%x\n",
- mId, mWidth, mHeight, mFormat);
- result.appendFormat(" size %zu, usage 0x%x, requested format 0x%x\n",
- mSize, mUsage, mFormatRequested);
- result.appendFormat(" total buffers: %d, dequeued buffers: %d\n",
- mTotalBuffers, mActiveBuffers);
- result.appendFormat(" frame count: %d, last timestamp %" PRId64 "\n",
- mFrameCount, mLastTimestamp);
- write(fd, result.string(), result.size());
- return OK;
-}
-
-const camera2_stream_ops *Camera2Device::StreamAdapter::getStreamOps() {
- return static_cast<camera2_stream_ops *>(this);
-}
-
-ANativeWindow* Camera2Device::StreamAdapter::toANW(
- const camera2_stream_ops_t *w) {
- return static_cast<const StreamAdapter*>(w)->mConsumerInterface.get();
-}
-
-int Camera2Device::StreamAdapter::dequeue_buffer(const camera2_stream_ops_t *w,
- buffer_handle_t** buffer) {
- ATRACE_CALL();
- int res;
- StreamAdapter* stream =
- const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
- if (stream->mState != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
- return INVALID_OPERATION;
- }
-
- ANativeWindow *a = toANW(w);
- ANativeWindowBuffer* anb;
- res = native_window_dequeue_buffer_and_wait(a, &anb);
- if (res != OK) {
- ALOGE("Stream %d dequeue: Error from native_window: %s (%d)", stream->mId,
- strerror(-res), res);
- return res;
- }
-
- *buffer = &(anb->handle);
- stream->mActiveBuffers++;
-
- ALOGVV("Stream %d dequeue: Buffer %p dequeued", stream->mId, (void*)(**buffer));
- return res;
-}
-
-int Camera2Device::StreamAdapter::enqueue_buffer(const camera2_stream_ops_t* w,
- int64_t timestamp,
- buffer_handle_t* buffer) {
- ATRACE_CALL();
- StreamAdapter *stream =
- const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
- stream->mFrameCount++;
- ALOGVV("Stream %d enqueue: Frame %d (%p) captured at %lld ns",
- stream->mId, stream->mFrameCount, (void*)(*buffer), timestamp);
- int state = stream->mState;
- if (state != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
- return INVALID_OPERATION;
- }
- ANativeWindow *a = toANW(w);
- status_t err;
-
- err = native_window_set_buffers_timestamp(a, timestamp);
- if (err != OK) {
- ALOGE("%s: Error setting timestamp on native window: %s (%d)",
- __FUNCTION__, strerror(-err), err);
- return err;
- }
- err = a->queueBuffer(a,
- container_of(buffer, ANativeWindowBuffer, handle), -1);
- if (err != OK) {
- ALOGE("%s: Error queueing buffer to native window: %s (%d)",
- __FUNCTION__, strerror(-err), err);
- return err;
- }
-
- stream->mActiveBuffers--;
- stream->mLastTimestamp = timestamp;
- return OK;
-}
-
-int Camera2Device::StreamAdapter::cancel_buffer(const camera2_stream_ops_t* w,
- buffer_handle_t* buffer) {
- ATRACE_CALL();
- StreamAdapter *stream =
- const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
- ALOGVV("Stream %d cancel: Buffer %p",
- stream->mId, (void*)(*buffer));
- if (stream->mState != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
- return INVALID_OPERATION;
- }
-
- ANativeWindow *a = toANW(w);
- int err = a->cancelBuffer(a,
- container_of(buffer, ANativeWindowBuffer, handle), -1);
- if (err != OK) {
- ALOGE("%s: Error canceling buffer to native window: %s (%d)",
- __FUNCTION__, strerror(-err), err);
- return err;
- }
-
- stream->mActiveBuffers--;
- return OK;
-}
-
-int Camera2Device::StreamAdapter::set_crop(const camera2_stream_ops_t* w,
- int left, int top, int right, int bottom) {
- ATRACE_CALL();
- int state = static_cast<const StreamAdapter*>(w)->mState;
- if (state != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
- return INVALID_OPERATION;
- }
- ANativeWindow *a = toANW(w);
- android_native_rect_t crop = { left, top, right, bottom };
- return native_window_set_crop(a, &crop);
-}
-
-/**
- * Camera2Device::ReprocessStreamAdapter
- */
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
- (type *)((char*)(ptr) - offsetof(type, member))
-#endif
-
-Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
- mState(RELEASED),
- mHal2Device(d),
- mId(-1),
- mWidth(0), mHeight(0), mFormat(0),
- mActiveBuffers(0),
- mFrameCount(0)
-{
- ATRACE_CALL();
- camera2_stream_in_ops::acquire_buffer = acquire_buffer;
- camera2_stream_in_ops::release_buffer = release_buffer;
-}
-
-Camera2Device::ReprocessStreamAdapter::~ReprocessStreamAdapter() {
- ATRACE_CALL();
- if (mState != RELEASED) {
- release();
- }
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
- const sp<StreamAdapter> &outputStream) {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: E", __FUNCTION__);
-
- if (mState != RELEASED) return INVALID_OPERATION;
- if (outputStream == NULL) {
- ALOGE("%s: Null base stream passed to reprocess stream adapter",
- __FUNCTION__);
- return BAD_VALUE;
- }
-
- mBaseStream = outputStream;
- mWidth = outputStream->getWidth();
- mHeight = outputStream->getHeight();
- mFormat = outputStream->getFormat();
-
- ALOGV("%s: New reprocess stream parameters %d x %d, format 0x%x",
- __FUNCTION__, mWidth, mHeight, mFormat);
-
- // Allocate device-side stream interface
-
- uint32_t id;
- res = mHal2Device->ops->allocate_reprocess_stream_from_stream(mHal2Device,
- outputStream->getId(), getStreamOps(),
- &id);
- if (res != OK) {
- ALOGE("%s: Device reprocess stream allocation failed: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- ALOGV("%s: Allocated reprocess stream id %d based on stream %d",
- __FUNCTION__, id, outputStream->getId());
-
- mId = id;
-
- mState = ACTIVE;
-
- return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::release() {
- ATRACE_CALL();
- status_t res;
- ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
- if (mState >= ACTIVE) {
- res = mHal2Device->ops->release_reprocess_stream(mHal2Device, mId);
- if (res != OK) {
- ALOGE("%s: Unable to release stream %d",
- __FUNCTION__, mId);
- return res;
- }
- }
-
- List<QueueEntry>::iterator s;
- for (s = mQueue.begin(); s != mQueue.end(); s++) {
- sp<BufferReleasedListener> listener = s->releaseListener.promote();
- if (listener != 0) listener->onBufferReleased(s->handle);
- }
- for (s = mInFlightQueue.begin(); s != mInFlightQueue.end(); s++) {
- sp<BufferReleasedListener> listener = s->releaseListener.promote();
- if (listener != 0) listener->onBufferReleased(s->handle);
- }
- mQueue.clear();
- mInFlightQueue.clear();
-
- mState = RELEASED;
- return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
- buffer_handle_t *handle, const wp<BufferReleasedListener> &releaseListener) {
- ATRACE_CALL();
- // TODO: Some error checking here would be nice
- ALOGV("%s: Pushing buffer %p to stream", __FUNCTION__, (void*)(*handle));
-
- QueueEntry entry;
- entry.handle = handle;
- entry.releaseListener = releaseListener;
- mQueue.push_back(entry);
- return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
- const Vector<String16>& /*args*/) {
- ATRACE_CALL();
- String8 result =
- String8::format(" Reprocess stream %d: %d x %d, fmt 0x%x\n",
- mId, mWidth, mHeight, mFormat);
- result.appendFormat(" acquired buffers: %d\n",
- mActiveBuffers);
- result.appendFormat(" frame count: %d\n",
- mFrameCount);
- write(fd, result.string(), result.size());
- return OK;
-}
-
-const camera2_stream_in_ops *Camera2Device::ReprocessStreamAdapter::getStreamOps() {
- return static_cast<camera2_stream_in_ops *>(this);
-}
-
-int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
- const camera2_stream_in_ops_t *w,
- buffer_handle_t** buffer) {
- ATRACE_CALL();
-
- ReprocessStreamAdapter* stream =
- const_cast<ReprocessStreamAdapter*>(
- static_cast<const ReprocessStreamAdapter*>(w));
- if (stream->mState != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
- return INVALID_OPERATION;
- }
-
- if (stream->mQueue.empty()) {
- *buffer = NULL;
- return OK;
- }
-
- QueueEntry &entry = *(stream->mQueue.begin());
-
- *buffer = entry.handle;
-
- stream->mInFlightQueue.push_back(entry);
- stream->mQueue.erase(stream->mQueue.begin());
-
- stream->mActiveBuffers++;
-
- ALOGV("Stream %d acquire: Buffer %p acquired", stream->mId,
- (void*)(**buffer));
- return OK;
-}
-
-int Camera2Device::ReprocessStreamAdapter::release_buffer(
- const camera2_stream_in_ops_t* w,
- buffer_handle_t* buffer) {
- ATRACE_CALL();
- ReprocessStreamAdapter *stream =
- const_cast<ReprocessStreamAdapter*>(
- static_cast<const ReprocessStreamAdapter*>(w) );
- stream->mFrameCount++;
- ALOGV("Reprocess stream %d release: Frame %d (%p)",
- stream->mId, stream->mFrameCount, (void*)*buffer);
- int state = stream->mState;
- if (state != ACTIVE) {
- ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
- return INVALID_OPERATION;
- }
- stream->mActiveBuffers--;
-
- List<QueueEntry>::iterator s;
- for (s = stream->mInFlightQueue.begin(); s != stream->mInFlightQueue.end(); s++) {
- if ( s->handle == buffer ) break;
- }
- if (s == stream->mInFlightQueue.end()) {
- ALOGE("%s: Can't find buffer %p in in-flight list!", __FUNCTION__,
- buffer);
- return INVALID_OPERATION;
- }
-
- sp<BufferReleasedListener> listener = s->releaseListener.promote();
- if (listener != 0) {
- listener->onBufferReleased(s->handle);
- } else {
- ALOGE("%s: Can't free buffer - missing listener", __FUNCTION__);
- }
- stream->mInFlightQueue.erase(s);
-
- return OK;
-}
-
-// camera 2 devices don't support reprocessing
-status_t Camera2Device::createInputStream(
- uint32_t width, uint32_t height, int format, int *id) {
- ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-// camera 2 devices don't support reprocessing
-status_t Camera2Device::getInputBufferProducer(
- sp<IGraphicBufferProducer> *producer) {
- ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
deleted file mode 100644
index b4d343c..0000000
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2DEVICE_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2DEVICE_H
-
-#include <utils/Condition.h>
-#include <utils/Errors.h>
-#include <utils/List.h>
-#include <utils/Mutex.h>
-
-#include "common/CameraDeviceBase.h"
-
-namespace android {
-
-/**
- * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_2_0
- *
- * TODO for camera2 API implementation:
- * Does not produce notifyShutter / notifyIdle callbacks to NotificationListener
- * Use waitUntilDrained for idle.
- */
-class Camera2Device: public CameraDeviceBase {
- public:
- Camera2Device(int id);
-
- virtual ~Camera2Device();
-
- /**
- * CameraDevice interface
- */
- virtual int getId() const;
- virtual status_t initialize(CameraModule *module);
- virtual status_t disconnect();
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual const CameraMetadata& info() const;
- virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
- virtual status_t captureList(const List<const CameraMetadata> &requests,
- int64_t *lastFrameNumber = NULL);
- virtual status_t setStreamingRequest(const CameraMetadata &request,
- int64_t *lastFrameNumber = NULL);
- virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
- int64_t *lastFrameNumber = NULL);
- virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
- virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
- virtual status_t createStream(sp<Surface> consumer,
- uint32_t width, uint32_t height, int format,
- android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
- virtual status_t createInputStream(
- uint32_t width, uint32_t height, int format, int *id);
- virtual status_t createReprocessStreamFromStream(int outputId, int *id);
- virtual status_t getStreamInfo(int id,
- uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace);
- virtual status_t setStreamTransform(int id, int transform);
- virtual status_t deleteStream(int id);
- virtual status_t deleteReprocessStream(int id);
- // No-op on HAL2 devices
- virtual status_t configureStreams(bool isConstrainedHighSpeed = false);
- virtual status_t getInputBufferProducer(
- sp<IGraphicBufferProducer> *producer);
- virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
- virtual status_t waitUntilDrained();
- virtual status_t setNotifyCallback(NotificationListener *listener);
- virtual bool willNotify3A();
- virtual status_t waitForNextFrame(nsecs_t timeout);
- virtual status_t getNextResult(CaptureResult *frame);
- virtual status_t triggerAutofocus(uint32_t id);
- virtual status_t triggerCancelAutofocus(uint32_t id);
- virtual status_t triggerPrecaptureMetering(uint32_t id);
- virtual status_t pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
- // Flush implemented as just a wait
- virtual status_t flush(int64_t *lastFrameNumber = NULL);
- // Prepare and tearDown are no-ops
- virtual status_t prepare(int streamId);
- virtual status_t tearDown(int streamId);
- virtual status_t prepare(int maxCount, int streamId);
-
- virtual uint32_t getDeviceVersion();
- virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
-
- private:
- const int mId;
- camera2_device_t *mHal2Device;
-
- CameraMetadata mDeviceInfo;
-
- uint32_t mDeviceVersion;
-
- /**
- * Queue class for both sending requests to a camera2 device, and for
- * receiving frames from a camera2 device.
- */
- class MetadataQueue: public camera2_request_queue_src_ops_t,
- public camera2_frame_queue_dst_ops_t {
- public:
- MetadataQueue();
- ~MetadataQueue();
-
- // Interface to camera2 HAL device, either for requests (device is
- // consumer) or for frames (device is producer)
- const camera2_request_queue_src_ops_t* getToConsumerInterface();
- void setFromConsumerInterface(camera2_device_t *d);
-
- // Connect queue consumer endpoint to a camera2 device
- status_t setConsumerDevice(camera2_device_t *d);
- // Connect queue producer endpoint to a camera2 device
- status_t setProducerDevice(camera2_device_t *d);
-
- const camera2_frame_queue_dst_ops_t* getToProducerInterface();
-
- // Real interfaces. On enqueue, queue takes ownership of buffer pointer
- // On dequeue, user takes ownership of buffer pointer.
- status_t enqueue(camera_metadata_t *buf);
- status_t dequeue(camera_metadata_t **buf, bool incrementCount = false);
- int getBufferCount();
- status_t waitForBuffer(nsecs_t timeout);
- // Wait until a buffer with the given ID is dequeued. Will return
- // immediately if the latest buffer dequeued has that ID.
- status_t waitForDequeue(int32_t id, nsecs_t timeout);
-
- // Set repeating buffer(s); if the queue is empty on a dequeue call, the
- // queue copies the contents of the stream slot into the queue, and then
- // dequeues the first new entry. The methods take the ownership of the
- // metadata buffers passed in.
- status_t setStreamSlot(camera_metadata_t *buf);
- status_t setStreamSlot(const List<camera_metadata_t*> &bufs);
-
- // Clear the request queue and the streaming slot
- status_t clear();
-
- status_t dump(int fd, const Vector<String16>& args);
-
- private:
- status_t signalConsumerLocked();
- status_t freeBuffers(List<camera_metadata_t*>::iterator start,
- List<camera_metadata_t*>::iterator end);
-
- camera2_device_t *mHal2Device;
-
- Mutex mMutex;
- Condition notEmpty;
-
- int mFrameCount;
- int32_t mLatestRequestId;
- Condition mNewRequestId;
-
- int mCount;
- List<camera_metadata_t*> mEntries;
- int mStreamSlotCount;
- List<camera_metadata_t*> mStreamSlot;
-
- bool mSignalConsumer;
-
- static MetadataQueue* getInstance(
- const camera2_frame_queue_dst_ops_t *q);
- static MetadataQueue* getInstance(
- const camera2_request_queue_src_ops_t *q);
-
- static int consumer_buffer_count(
- const camera2_request_queue_src_ops_t *q);
-
- static int consumer_dequeue(const camera2_request_queue_src_ops_t *q,
- camera_metadata_t **buffer);
-
- static int consumer_free(const camera2_request_queue_src_ops_t *q,
- camera_metadata_t *old_buffer);
-
- static int producer_dequeue(const camera2_frame_queue_dst_ops_t *q,
- size_t entries, size_t bytes,
- camera_metadata_t **buffer);
-
- static int producer_cancel(const camera2_frame_queue_dst_ops_t *q,
- camera_metadata_t *old_buffer);
-
- static int producer_enqueue(const camera2_frame_queue_dst_ops_t *q,
- camera_metadata_t *filled_buffer);
-
- }; // class MetadataQueue
-
- MetadataQueue mRequestQueue;
- MetadataQueue mFrameQueue;
-
- /**
- * Adapter from an ANativeWindow interface to camera2 device stream ops.
- * Also takes care of allocating/deallocating stream in device interface
- */
- class StreamAdapter: public camera2_stream_ops, public virtual RefBase {
- public:
- StreamAdapter(camera2_device_t *d);
-
- ~StreamAdapter();
-
- /**
- * Create a HAL device stream of the requested size and format.
- *
- * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device
- * selects an appropriate format; it can be queried with getFormat.
- *
- * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must
- * be equal to the size in bytes of the buffers to allocate for the
- * stream. For other formats, the size parameter is ignored.
- */
- status_t connectToDevice(sp<ANativeWindow> consumer,
- uint32_t width, uint32_t height, int format, size_t size);
-
- status_t release();
-
- status_t setTransform(int transform);
-
- // Get stream parameters.
- // Only valid after a successful connectToDevice call.
- int getId() const { return mId; }
- uint32_t getWidth() const { return mWidth; }
- uint32_t getHeight() const { return mHeight; }
- uint32_t getFormat() const { return mFormat; }
-
- // Dump stream information
- status_t dump(int fd, const Vector<String16>& args);
-
- private:
- enum {
- ERROR = -1,
- RELEASED = 0,
- ALLOCATED,
- CONNECTED,
- ACTIVE
- } mState;
-
- sp<ANativeWindow> mConsumerInterface;
- camera2_device_t *mHal2Device;
-
- uint32_t mId;
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
- size_t mSize;
- uint32_t mUsage;
- uint32_t mMaxProducerBuffers;
- uint32_t mMaxConsumerBuffers;
- uint32_t mTotalBuffers;
- int mFormatRequested;
-
- /** Debugging information */
- uint32_t mActiveBuffers;
- uint32_t mFrameCount;
- int64_t mLastTimestamp;
-
- const camera2_stream_ops *getStreamOps();
-
- static ANativeWindow* toANW(const camera2_stream_ops_t *w);
-
- static int dequeue_buffer(const camera2_stream_ops_t *w,
- buffer_handle_t** buffer);
-
- static int enqueue_buffer(const camera2_stream_ops_t* w,
- int64_t timestamp,
- buffer_handle_t* buffer);
-
- static int cancel_buffer(const camera2_stream_ops_t* w,
- buffer_handle_t* buffer);
-
- static int set_crop(const camera2_stream_ops_t* w,
- int left, int top, int right, int bottom);
- }; // class StreamAdapter
-
- typedef List<sp<StreamAdapter> > StreamList;
- StreamList mStreams;
-
- /**
- * Adapter from an ANativeWindow interface to camera2 device stream ops.
- * Also takes care of allocating/deallocating stream in device interface
- */
- class ReprocessStreamAdapter: public camera2_stream_in_ops, public virtual RefBase {
- public:
- ReprocessStreamAdapter(camera2_device_t *d);
-
- ~ReprocessStreamAdapter();
-
- /**
- * Create a HAL device reprocess stream based on an existing output stream.
- */
- status_t connectToDevice(const sp<StreamAdapter> &outputStream);
-
- status_t release();
-
- /**
- * Push buffer into stream for reprocessing. Takes ownership until it notifies
- * that the buffer has been released
- */
- status_t pushIntoStream(buffer_handle_t *handle,
- const wp<BufferReleasedListener> &releaseListener);
-
- /**
- * Get stream parameters.
- * Only valid after a successful connectToDevice call.
- */
- int getId() const { return mId; }
- uint32_t getWidth() const { return mWidth; }
- uint32_t getHeight() const { return mHeight; }
- uint32_t getFormat() const { return mFormat; }
-
- // Dump stream information
- status_t dump(int fd, const Vector<String16>& args);
-
- private:
- enum {
- ERROR = -1,
- RELEASED = 0,
- ACTIVE
- } mState;
-
- sp<ANativeWindow> mConsumerInterface;
- wp<StreamAdapter> mBaseStream;
-
- struct QueueEntry {
- buffer_handle_t *handle;
- wp<BufferReleasedListener> releaseListener;
- };
-
- List<QueueEntry> mQueue;
-
- List<QueueEntry> mInFlightQueue;
-
- camera2_device_t *mHal2Device;
-
- uint32_t mId;
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
-
- /** Debugging information */
- uint32_t mActiveBuffers;
- uint32_t mFrameCount;
- int64_t mLastTimestamp;
-
- const camera2_stream_in_ops *getStreamOps();
-
- static int acquire_buffer(const camera2_stream_in_ops_t *w,
- buffer_handle_t** buffer);
-
- static int release_buffer(const camera2_stream_in_ops_t* w,
- buffer_handle_t* buffer);
-
- }; // class ReprocessStreamAdapter
-
- typedef List<sp<ReprocessStreamAdapter> > ReprocessStreamList;
- ReprocessStreamList mReprocessStreams;
-
- // Receives HAL notifications and routes them to the NotificationListener
- static void notificationCallback(int32_t msg_type,
- int32_t ext1,
- int32_t ext2,
- int32_t ext3,
- void *user);
-
-}; // class Camera2Device
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6220349..7acd150 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -3302,7 +3302,7 @@
}
if (mNextRequests.size() < batchSize) {
- ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
+ ALOGE("RequestThread: only get %zu out of %zu requests. Skipping requests.",
mNextRequests.size(), batchSize);
cleanUpFailedRequests(/*sendRequestError*/true);
}
@@ -3622,7 +3622,7 @@
status_t Camera3Device::RequestThread::addDummyTriggerIds(
const sp<CaptureRequest> &request) {
- // Trigger ID 0 has special meaning in the HAL2 spec, so avoid it here
+ // Trigger ID 0 had special meaning in the HAL2 spec, so avoid it here
static const int32_t dummyTriggerId = 1;
status_t res;
@@ -3716,8 +3716,6 @@
}
status_t Camera3Device::PreparerThread::clear() {
- status_t res;
-
Mutex::Autolock l(mLock);
for (const auto& stream : mPendingStreams) {