Merge "Plumbing to recognize AV1 content"
diff --git a/apex/Android.bp b/apex/Android.bp
index eee26ae..05cc2c5 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -21,7 +21,11 @@
"libamrextractor",
"libflacextractor",
"libmidiextractor",
+ "libmkvextractor",
"libmp3extractor",
+ "libmp4extractor",
+ "libmpeg2extractor",
+ "liboggextractor",
"libwavextractor",
],
key: "com.android.media.key",
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 321eb08..4e9b27d 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -167,19 +167,23 @@
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
+ const String16& physicalId,
int surfaceSetID, bool isShared) {
mGbps.push_back(gbp);
mRotation = rotation;
mSurfaceSetID = surfaceSetID;
mIsDeferred = false;
mIsShared = isShared;
+ mPhysicalCameraId = physicalId;
}
OutputConfiguration::OutputConfiguration(
const std::vector<sp<IGraphicBufferProducer>>& gbps,
- int rotation, int surfaceSetID, int surfaceType, int width, int height, bool isShared)
+ int rotation, const String16& physicalCameraId, int surfaceSetID, int surfaceType,
+ int width, int height, bool isShared)
: mGbps(gbps), mRotation(rotation), mSurfaceSetID(surfaceSetID), mSurfaceType(surfaceType),
- mWidth(width), mHeight(height), mIsDeferred(false), mIsShared(isShared) { }
+ mWidth(width), mHeight(height), mIsDeferred(false), mIsShared(isShared),
+ mPhysicalCameraId(physicalCameraId) { }
status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index 5b117fb..95c4f39 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -65,10 +65,12 @@
OutputConfiguration(const android::Parcel& parcel);
OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
+ const String16& physicalCameraId,
int surfaceSetID = INVALID_SET_ID, bool isShared = false);
OutputConfiguration(const std::vector<sp<IGraphicBufferProducer>>& gbps,
- int rotation, int surfaceSetID = INVALID_SET_ID,
+ int rotation, const String16& physicalCameraId,
+ int surfaceSetID = INVALID_SET_ID,
int surfaceType = OutputConfiguration::SURFACE_TYPE_UNKNOWN, int width = 0,
int height = 0, bool isShared = false);
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index fd95296..540d84e 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -28,6 +28,8 @@
#include <camera/NdkCameraCaptureSession.h>
#include "impl/ACameraCaptureSession.h"
+#include "impl/ACameraCaptureSession.inc"
+
using namespace android;
EXPORT
@@ -82,7 +84,31 @@
return ACAMERA_ERROR_SESSION_CLOSED;
}
- return session->capture(cbs, numRequests, requests, captureSequenceId);
+ return session->capture(
+ cbs, numRequests, requests, captureSequenceId);
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_logicalCamera_capture(
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ ATRACE_CALL();
+ if (session == nullptr || requests == nullptr || numRequests < 1) {
+ ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+ __FUNCTION__, session, numRequests, requests);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (session->isClosed()) {
+ ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+ *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+
+ return session->capture(
+ lcbs, numRequests, requests, captureSequenceId);
}
EXPORT
@@ -107,6 +133,28 @@
}
EXPORT
+camera_status_t ACameraCaptureSession_logicalCamera_setRepeatingRequest(
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ ATRACE_CALL();
+ if (session == nullptr || requests == nullptr || numRequests < 1) {
+ ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+ __FUNCTION__, session, numRequests, requests);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (session->isClosed()) {
+ ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+ *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+
+ return session->setRepeatingRequest(lcbs, numRequests, requests, captureSequenceId);
+}
+
+EXPORT
camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session) {
ATRACE_CALL();
if (session == nullptr) {
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index ef05e0b..98608da 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -129,6 +129,20 @@
}
EXPORT
+camera_status_t ACaptureSessionPhysicalOutput_create(
+ ACameraWindowType* window, const char* physicalId,
+ /*out*/ACaptureSessionOutput** out) {
+ ATRACE_CALL();
+ if (window == nullptr || physicalId == nullptr || out == nullptr) {
+ ALOGE("%s: Error: bad argument. window %p, physicalId %p, out %p",
+ __FUNCTION__, window, physicalId, out);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ *out = new ACaptureSessionOutput(window, false, physicalId);
+ return ACAMERA_OK;
+}
+
+EXPORT
camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *out,
ACameraWindowType* window) {
ATRACE_CALL();
diff --git a/camera/ndk/NdkCameraMetadata.cpp b/camera/ndk/NdkCameraMetadata.cpp
index 34ec2da..9a39ed8 100644
--- a/camera/ndk/NdkCameraMetadata.cpp
+++ b/camera/ndk/NdkCameraMetadata.cpp
@@ -69,3 +69,20 @@
metadata->decStrong((void*) ACameraMetadata_free);
}
}
+
+EXPORT
+bool ACameraMetadata_isLogicalMultiCamera(const ACameraMetadata* staticMetadata,
+ /*out*/size_t* numPhysicalCameras, /*out*/const char*const** physicalCameraIds) {
+ ATRACE_CALL();
+ if (numPhysicalCameras == nullptr || physicalCameraIds == nullptr) {
+ ALOGE("%s: Invalid input: numPhysicalCameras %p, physicalCameraIds %p",
+ __FUNCTION__, numPhysicalCameras, physicalCameraIds);
+ return false;
+ }
+ if (staticMetadata == nullptr) {
+ ALOGE("%s: Invalid input: staticMetadata is null.", __FUNCTION__);
+ return false;
+ }
+
+ return staticMetadata->isLogicalMultiCamera(numPhysicalCameras, physicalCameraIds);
+}
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index fb72bdb..d6f1412 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -107,47 +107,6 @@
return ret;
}
-camera_status_t
-ACameraCaptureSession::setRepeatingRequest(
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- sp<acam::CameraDevice> dev = getDeviceSp();
- if (dev == nullptr) {
- ALOGE("Error: Device associated with session %p has been closed!", this);
- return ACAMERA_ERROR_SESSION_CLOSED;
- }
-
- camera_status_t ret;
- dev->lockDeviceForSessionOps();
- {
- Mutex::Autolock _l(mSessionLock);
- ret = dev->setRepeatingRequestsLocked(
- this, cbs, numRequests, requests, captureSequenceId);
- }
- dev->unlockDevice();
- return ret;
-}
-
-camera_status_t ACameraCaptureSession::capture(
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- sp<acam::CameraDevice> dev = getDeviceSp();
- if (dev == nullptr) {
- ALOGE("Error: Device associated with session %p has been closed!", this);
- return ACAMERA_ERROR_SESSION_CLOSED;
- }
- camera_status_t ret;
- dev->lockDeviceForSessionOps();
- {
- Mutex::Autolock _l(mSessionLock);
- ret = dev->captureLocked(this, cbs, numRequests, requests, captureSequenceId);
- }
- dev->unlockDevice();
- return ret;
-}
-
camera_status_t ACameraCaptureSession::updateOutputConfiguration(ACaptureSessionOutput *output) {
sp<acam::CameraDevice> dev = getDeviceSp();
if (dev == nullptr) {
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
index 133c2c8..08a9226 100644
--- a/camera/ndk/impl/ACameraCaptureSession.h
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -17,6 +17,7 @@
#define _ACAMERA_CAPTURE_SESSION_H
#include <set>
+#include <string>
#include <hardware/camera3.h>
#include <camera/NdkCameraDevice.h>
@@ -29,8 +30,9 @@
using namespace android;
struct ACaptureSessionOutput {
- explicit ACaptureSessionOutput(ACameraWindowType* window, bool isShared = false) :
- mWindow(window), mIsShared(isShared) {};
+ explicit ACaptureSessionOutput(ACameraWindowType* window, bool isShared = false,
+ const char* physicalCameraId = "") :
+ mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
bool operator == (const ACaptureSessionOutput& other) const {
return mWindow == other.mWindow;
@@ -49,6 +51,7 @@
std::set<ACameraWindowType *> mSharedWindows;
bool mIsShared;
int mRotation = CAMERA3_STREAM_ROTATION_0;
+ std::string mPhysicalCameraId;
};
#endif
@@ -88,13 +91,15 @@
camera_status_t abortCaptures();
+ template<class T>
camera_status_t setRepeatingRequest(
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ template<class T>
camera_status_t capture(
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
diff --git a/camera/ndk/impl/ACameraCaptureSession.inc b/camera/ndk/impl/ACameraCaptureSession.inc
new file mode 100644
index 0000000..86bf8a5
--- /dev/null
+++ b/camera/ndk/impl/ACameraCaptureSession.inc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ACameraCaptureSession.h"
+
+#ifdef __ANDROID_VNDK__
+#include "ndk_vendor/impl/ACameraDeviceVendor.inc"
+#else
+#include "ACameraDevice.inc"
+#endif
+
+using namespace android;
+
+template <class T>
+camera_status_t
+ACameraCaptureSession::setRepeatingRequest(
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ sp<acam::CameraDevice> dev = getDeviceSp();
+ if (dev == nullptr) {
+ ALOGE("Error: Device associated with session %p has been closed!", this);
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+
+ camera_status_t ret;
+ dev->lockDeviceForSessionOps();
+ {
+ Mutex::Autolock _l(mSessionLock);
+ ret = dev->setRepeatingRequestsLocked(
+ this, cbs, numRequests, requests, captureSequenceId);
+ }
+ dev->unlockDevice();
+ return ret;
+}
+
+template <class T>
+camera_status_t ACameraCaptureSession::capture(
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ sp<acam::CameraDevice> dev = getDeviceSp();
+ if (dev == nullptr) {
+ ALOGE("Error: Device associated with session %p has been closed!", this);
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+ camera_status_t ret;
+ dev->lockDeviceForSessionOps();
+ {
+ Mutex::Autolock _l(mSessionLock);
+ ret = dev->captureLocked(this, cbs, numRequests, requests, captureSequenceId);
+ }
+ dev->unlockDevice();
+ return ret;
+}
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 00da54e..d8a5765 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -20,13 +20,14 @@
#include <vector>
#include <inttypes.h>
#include <android/hardware/ICameraService.h>
-#include <camera2/SubmitInfo.h>
#include <gui/Surface.h>
#include "ACameraDevice.h"
#include "ACameraMetadata.h"
#include "ACaptureRequest.h"
#include "ACameraCaptureSession.h"
+#include "ACameraCaptureSession.inc"
+
namespace android {
namespace acam {
@@ -39,6 +40,7 @@
const char* CameraDevice::kCaptureRequestKey = "CaptureRequest";
const char* CameraDevice::kTimeStampKey = "TimeStamp";
const char* CameraDevice::kCaptureResultKey = "CaptureResult";
+const char* CameraDevice::kPhysicalCaptureResultKey = "PhysicalCaptureResult";
const char* CameraDevice::kCaptureFailureKey = "CaptureFailure";
const char* CameraDevice::kSequenceIdKey = "SequenceId";
const char* CameraDevice::kFrameNumberKey = "FrameNumber";
@@ -190,106 +192,6 @@
return ACAMERA_OK;
}
-camera_status_t
-CameraDevice::captureLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- return submitRequestsLocked(
- session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/false);
-}
-
-camera_status_t
-CameraDevice::setRepeatingRequestsLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- return submitRequestsLocked(
- session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/true);
-}
-
-camera_status_t
-CameraDevice::submitRequestsLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId,
- bool isRepeating) {
- camera_status_t ret = checkCameraClosedOrErrorLocked();
- if (ret != ACAMERA_OK) {
- ALOGE("Camera %s submit capture request failed! ret %d", getId(), ret);
- return ret;
- }
-
- // Form two vectors of capture request, one for internal tracking
- std::vector<hardware::camera2::CaptureRequest> requestList;
- Vector<sp<CaptureRequest> > requestsV;
- requestsV.setCapacity(numRequests);
- for (int i = 0; i < numRequests; i++) {
- sp<CaptureRequest> req;
- ret = allocateCaptureRequest(requests[i], req);
- if (ret != ACAMERA_OK) {
- ALOGE("Convert capture request to internal format failure! ret %d", ret);
- return ret;
- }
- if (req->mSurfaceList.empty()) {
- ALOGE("Capture request without output target cannot be submitted!");
- return ACAMERA_ERROR_INVALID_PARAMETER;
- }
- requestList.push_back(*(req.get()));
- requestsV.push_back(req);
- }
-
- if (isRepeating) {
- ret = stopRepeatingLocked();
- if (ret != ACAMERA_OK) {
- ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
- return ret;
- }
- }
-
- binder::Status remoteRet;
- hardware::camera2::utils::SubmitInfo info;
- remoteRet = mRemote->submitRequestList(requestList, isRepeating, &info);
- int sequenceId = info.mRequestId;
- int64_t lastFrameNumber = info.mLastFrameNumber;
- if (sequenceId < 0) {
- ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
- return ACAMERA_ERROR_UNKNOWN;
- }
-
- CallbackHolder cbHolder(session, requestsV, isRepeating, cbs);
- mSequenceCallbackMap.insert(std::make_pair(sequenceId, cbHolder));
-
- if (isRepeating) {
- // stopRepeating above should have cleanup repeating sequence id
- if (mRepeatingSequenceId != REQUEST_ID_NONE) {
- setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
- return ACAMERA_ERROR_CAMERA_DEVICE;
- }
- mRepeatingSequenceId = sequenceId;
- } else {
- mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
- }
-
- if (mIdle) {
- sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
- msg->setPointer(kContextKey, session->mUserSessionCallback.context);
- msg->setObject(kSessionSpKey, session);
- msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
- postSessionMsgAndCleanup(msg);
- }
- mIdle = false;
- mBusySession = session;
-
- if (captureSequenceId) {
- *captureSequenceId = sequenceId;
- }
- return ACAMERA_OK;
-}
-
camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
@@ -325,8 +227,9 @@
return ret;
}
- OutputConfiguration outConfig(iGBP, output->mRotation, OutputConfiguration::INVALID_SET_ID,
- true);
+ String16 physicalId16(output->mPhysicalCameraId.c_str());
+ OutputConfiguration outConfig(iGBP, output->mRotation, physicalId16,
+ OutputConfiguration::INVALID_SET_ID, true);
for (auto& anw : output->mSharedWindows) {
ret = getIGBPfromAnw(anw, iGBP);
@@ -633,15 +536,16 @@
}
std::set<std::pair<ANativeWindow*, OutputConfiguration>> outputSet;
- for (auto outConfig : outputs->mOutputs) {
+ for (const auto& outConfig : outputs->mOutputs) {
ANativeWindow* anw = outConfig.mWindow;
sp<IGraphicBufferProducer> iGBP(nullptr);
ret = getIGBPfromAnw(anw, iGBP);
if (ret != ACAMERA_OK) {
return ret;
}
+ String16 physicalId16(outConfig.mPhysicalCameraId.c_str());
outputSet.insert(std::make_pair(
- anw, OutputConfiguration(iGBP, outConfig.mRotation,
+ anw, OutputConfiguration(iGBP, outConfig.mRotation, physicalId16,
OutputConfiguration::INVALID_SET_ID, outConfig.mIsShared)));
}
auto addSet = outputSet;
@@ -706,7 +610,7 @@
}
// add new streams
- for (auto outputPair : addSet) {
+ for (const auto& outputPair : addSet) {
int streamId;
remoteRet = mRemote->createStream(outputPair.second, &streamId);
if (!remoteRet.isOk()) {
@@ -829,7 +733,7 @@
if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER) {
int32_t streamId = resultExtras.errorStreamId;
ACameraCaptureSession_captureCallback_bufferLost onBufferLost =
- cbh.mCallbacks.onCaptureBufferLost;
+ cbh.mOnCaptureBufferLost;
auto outputPairIt = mConfiguredOutputs.find(streamId);
if (outputPairIt == mConfiguredOutputs.end()) {
ALOGE("%s: Error: stream id %d does not exist", __FUNCTION__, streamId);
@@ -839,14 +743,14 @@
const auto& gbps = outputPairIt->second.second.getGraphicBufferProducers();
for (const auto& outGbp : gbps) {
- for (auto surface : request->mSurfaceList) {
+ for (const auto& surface : request->mSurfaceList) {
if (surface->getIGraphicBufferProducer() == outGbp) {
ANativeWindow* anw = static_cast<ANativeWindow*>(surface.get());
ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
getId(), anw, frameNumber);
sp<AMessage> msg = new AMessage(kWhatCaptureBufferLost, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onBufferLost);
msg->setObject(kCaptureRequestKey, request);
@@ -858,7 +762,7 @@
}
} else { // Handle other capture failures
// Fire capture failure callback if there is one registered
- ACameraCaptureSession_captureCallback_failed onError = cbh.mCallbacks.onCaptureFailed;
+ ACameraCaptureSession_captureCallback_failed onError = cbh.mOnCaptureFailed;
sp<CameraCaptureFailure> failure(new CameraCaptureFailure());
failure->frameNumber = frameNumber;
// TODO: refine this when implementing flush
@@ -868,7 +772,7 @@
hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT);
sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onError);
msg->setObject(kCaptureRequestKey, request);
@@ -890,6 +794,7 @@
case kWhatSessionStateCb:
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureSeqEnd:
case kWhatCaptureSeqAbort:
@@ -960,6 +865,7 @@
case kWhatSessionStateCb:
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureSeqEnd:
case kWhatCaptureSeqAbort:
@@ -977,6 +883,7 @@
switch (msg->what()) {
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureBufferLost:
found = msg->findObject(kCaptureRequestKey, &obj);
@@ -1048,6 +955,64 @@
freeACaptureRequest(request);
break;
}
+ case kWhatLogicalCaptureResult:
+ {
+ ACameraCaptureSession_logicalCamera_captureCallback_result onResult;
+ found = msg->findPointer(kCallbackFpKey, (void**) &onResult);
+ if (!found) {
+ ALOGE("%s: Cannot find logicalCamera capture result callback!",
+ __FUNCTION__);
+ return;
+ }
+ if (onResult == nullptr) {
+ return;
+ }
+
+ found = msg->findObject(kCaptureResultKey, &obj);
+ if (!found) {
+ ALOGE("%s: Cannot find capture result!", __FUNCTION__);
+ return;
+ }
+ sp<ACameraMetadata> result(static_cast<ACameraMetadata*>(obj.get()));
+
+ found = msg->findObject(kPhysicalCaptureResultKey, &obj);
+ if (!found) {
+ ALOGE("%s: Cannot find physical capture result!", __FUNCTION__);
+ return;
+ }
+ sp<ACameraPhysicalCaptureResultInfo> physicalResult(
+ static_cast<ACameraPhysicalCaptureResultInfo*>(obj.get()));
+ std::vector<PhysicalCaptureResultInfo>& physicalResultInfo =
+ physicalResult->mPhysicalResultInfo;
+
+ std::vector<std::string> physicalCameraIds;
+ std::vector<sp<ACameraMetadata>> physicalMetadataCopy;
+ for (size_t i = 0; i < physicalResultInfo.size(); i++) {
+ String8 physicalId8(physicalResultInfo[i].mPhysicalCameraId);
+ physicalCameraIds.push_back(physicalId8.c_str());
+
+ CameraMetadata clone = physicalResultInfo[i].mPhysicalCameraMetadata;
+ clone.update(ANDROID_SYNC_FRAME_NUMBER,
+ &physicalResult->mFrameNumber, /*data_count*/1);
+ sp<ACameraMetadata> metadata =
+ new ACameraMetadata(clone.release(), ACameraMetadata::ACM_RESULT);
+ physicalMetadataCopy.push_back(metadata);
+ }
+
+ std::vector<const char*> physicalCameraIdPtrs;
+ std::vector<const ACameraMetadata*> physicalMetadataCopyPtrs;
+ for (size_t i = 0; i < physicalResultInfo.size(); i++) {
+ physicalCameraIdPtrs.push_back(physicalCameraIds[i].c_str());
+ physicalMetadataCopyPtrs.push_back(physicalMetadataCopy[i].get());
+ }
+
+ ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ (*onResult)(context, session.get(), request, result.get(),
+ physicalResultInfo.size(), physicalCameraIdPtrs.data(),
+ physicalMetadataCopyPtrs.data());
+ freeACaptureRequest(request);
+ break;
+ }
case kWhatCaptureFail:
{
ACameraCaptureSession_captureCallback_failed onFail;
@@ -1158,12 +1123,34 @@
}
CameraDevice::CallbackHolder::CallbackHolder(
- sp<ACameraCaptureSession> session,
- const Vector<sp<CaptureRequest> >& requests,
- bool isRepeating,
- ACameraCaptureSession_captureCallbacks* cbs) :
- mSession(session), mRequests(requests),
- mIsRepeating(isRepeating), mCallbacks(fillCb(cbs)) {}
+ sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest> >& requests,
+ bool isRepeating,
+ ACameraCaptureSession_captureCallbacks* cbs) :
+ mSession(session), mRequests(requests),
+ mIsRepeating(isRepeating),
+ mIsLogicalCameraCallback(false) {
+ initCaptureCallbacks(cbs);
+
+ if (cbs != nullptr) {
+ mOnCaptureCompleted = cbs->onCaptureCompleted;
+ }
+}
+
+CameraDevice::CallbackHolder::CallbackHolder(
+ sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest> >& requests,
+ bool isRepeating,
+ ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs) :
+ mSession(session), mRequests(requests),
+ mIsRepeating(isRepeating),
+ mIsLogicalCameraCallback(true) {
+ initCaptureCallbacks(lcbs);
+
+ if (lcbs != nullptr) {
+ mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+ }
+}
void
CameraDevice::checkRepeatingSequenceCompleteLocked(
@@ -1180,9 +1167,9 @@
mSequenceCallbackMap.erase(cbIt);
// send seq aborted callback
sp<AMessage> msg = new AMessage(kWhatCaptureSeqAbort, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, cbh.mSession);
- msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceAborted);
+ msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceAborted);
msg->setInt32(kSequenceIdKey, sequenceId);
postSessionMsgAndCleanup(msg);
} else {
@@ -1230,9 +1217,9 @@
mSequenceCallbackMap.erase(cbIt);
// send seq complete callback
sp<AMessage> msg = new AMessage(kWhatCaptureSeqEnd, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, cbh.mSession);
- msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceCompleted);
+ msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceCompleted);
msg->setInt32(kSequenceIdKey, sequenceId);
msg->setInt64(kFrameNumberKey, lastFrameNumber);
@@ -1389,7 +1376,7 @@
auto it = dev->mSequenceCallbackMap.find(sequenceId);
if (it != dev->mSequenceCallbackMap.end()) {
CallbackHolder cbh = (*it).second;
- ACameraCaptureSession_captureCallback_start onStart = cbh.mCallbacks.onCaptureStarted;
+ ACameraCaptureSession_captureCallback_start onStart = cbh.mOnCaptureStarted;
sp<ACameraCaptureSession> session = cbh.mSession;
if ((size_t) burstId >= cbh.mRequests.size()) {
ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1398,7 +1385,7 @@
}
sp<CaptureRequest> request = cbh.mRequests[burstId];
sp<AMessage> msg = new AMessage(kWhatCaptureStart, dev->mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onStart);
msg->setObject(kCaptureRequestKey, request);
@@ -1413,7 +1400,6 @@
const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras,
const std::vector<PhysicalCaptureResultInfo>& physicalResultInfos) {
- (void) physicalResultInfos;
binder::Status ret = binder::Status::ok();
sp<CameraDevice> dev = mDevice.promote();
@@ -1449,9 +1435,6 @@
auto it = dev->mSequenceCallbackMap.find(sequenceId);
if (it != dev->mSequenceCallbackMap.end()) {
CallbackHolder cbh = (*it).second;
- ACameraCaptureSession_captureCallback_result onResult = isPartialResult ?
- cbh.mCallbacks.onCaptureProgressed :
- cbh.mCallbacks.onCaptureCompleted;
sp<ACameraCaptureSession> session = cbh.mSession;
if ((size_t) burstId >= cbh.mRequests.size()) {
ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1461,13 +1444,27 @@
sp<CaptureRequest> request = cbh.mRequests[burstId];
sp<ACameraMetadata> result(new ACameraMetadata(
metadataCopy.release(), ACameraMetadata::ACM_RESULT));
+ sp<ACameraPhysicalCaptureResultInfo> physicalResult(
+ new ACameraPhysicalCaptureResultInfo(physicalResultInfos, frameNumber));
- sp<AMessage> msg = new AMessage(kWhatCaptureResult, dev->mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ sp<AMessage> msg = new AMessage(
+ cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureResult : kWhatCaptureResult,
+ dev->mHandler);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
- msg->setPointer(kCallbackFpKey, (void*) onResult);
msg->setObject(kCaptureRequestKey, request);
msg->setObject(kCaptureResultKey, result);
+ if (isPartialResult) {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnCaptureProgressed);
+ } else if (cbh.mIsLogicalCameraCallback) {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnLogicalCameraCaptureCompleted);
+ msg->setObject(kPhysicalCaptureResultKey, physicalResult);
+ } else {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnCaptureCompleted);
+ }
dev->postSessionMsgAndCleanup(msg);
}
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 8f56d3f..d0f363b 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -21,6 +21,7 @@
#include <set>
#include <atomic>
#include <utility>
+#include <vector>
#include <utils/StrongPointer.h>
#include <utils/Mutex.h>
#include <utils/String8.h>
@@ -46,6 +47,16 @@
// Wrap ACameraCaptureFailure so it can be ref-counted
struct CameraCaptureFailure : public RefBase, public ACameraCaptureFailure {};
+// Wrap PhysicalCaptureResultInfo so that it can be ref-counted
+struct ACameraPhysicalCaptureResultInfo: public RefBase {
+ ACameraPhysicalCaptureResultInfo(const std::vector<PhysicalCaptureResultInfo>& info,
+ int64_t frameNumber) :
+ mPhysicalResultInfo(info), mFrameNumber(frameNumber) {}
+
+ std::vector<PhysicalCaptureResultInfo> mPhysicalResultInfo;
+ int64_t mFrameNumber;
+};
+
class CameraDevice final : public RefBase {
public:
CameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
@@ -109,19 +120,22 @@
camera_status_t waitUntilIdleLocked();
+ template<class T>
camera_status_t captureLocked(sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ template<class T>
camera_status_t setRepeatingRequestsLocked(sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ template<class T>
camera_status_t submitRequestsLocked(
sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*out*/int* captureSequenceId,
bool isRepeating);
@@ -192,6 +206,7 @@
// Capture callbacks
kWhatCaptureStart, // onCaptureStarted
kWhatCaptureResult, // onCaptureProgressed, onCaptureCompleted
+ kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
kWhatCaptureFail, // onCaptureFailed
kWhatCaptureSeqEnd, // onCaptureSequenceCompleted
kWhatCaptureSeqAbort, // onCaptureSequenceAborted
@@ -207,6 +222,7 @@
static const char* kCaptureRequestKey;
static const char* kTimeStampKey;
static const char* kCaptureResultKey;
+ static const char* kPhysicalCaptureResultKey;
static const char* kCaptureFailureKey;
static const char* kSequenceIdKey;
static const char* kFrameNumberKey;
@@ -245,19 +261,46 @@
const Vector<sp<CaptureRequest> >& requests,
bool isRepeating,
ACameraCaptureSession_captureCallbacks* cbs);
+ CallbackHolder(sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest> >& requests,
+ bool isRepeating,
+ ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs);
- static ACameraCaptureSession_captureCallbacks fillCb(
- ACameraCaptureSession_captureCallbacks* cbs) {
+ template <class T>
+ void initCaptureCallbacks(T* cbs) {
+ mContext = nullptr;
+ mOnCaptureStarted = nullptr;
+ mOnCaptureProgressed = nullptr;
+ mOnCaptureCompleted = nullptr;
+ mOnLogicalCameraCaptureCompleted = nullptr;
+ mOnCaptureFailed = nullptr;
+ mOnCaptureSequenceCompleted = nullptr;
+ mOnCaptureSequenceAborted = nullptr;
+ mOnCaptureBufferLost = nullptr;
if (cbs != nullptr) {
- return *cbs;
+ mContext = cbs->context;
+ mOnCaptureStarted = cbs->onCaptureStarted;
+ mOnCaptureProgressed = cbs->onCaptureProgressed;
+ mOnCaptureFailed = cbs->onCaptureFailed;
+ mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
+ mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
+ mOnCaptureBufferLost = cbs->onCaptureBufferLost;
}
- return { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
}
-
sp<ACameraCaptureSession> mSession;
Vector<sp<CaptureRequest> > mRequests;
const bool mIsRepeating;
- ACameraCaptureSession_captureCallbacks mCallbacks;
+ const bool mIsLogicalCameraCallback;
+
+ void* mContext;
+ ACameraCaptureSession_captureCallback_start mOnCaptureStarted;
+ ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
+ ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
+ ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+ ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
+ ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
+ ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
+ ACameraCaptureSession_captureCallback_bufferLost mOnCaptureBufferLost;
};
// sequence id -> callbacks map
std::map<int, CallbackHolder> mSequenceCallbackMap;
diff --git a/camera/ndk/impl/ACameraDevice.inc b/camera/ndk/impl/ACameraDevice.inc
new file mode 100644
index 0000000..1fc5352
--- /dev/null
+++ b/camera/ndk/impl/ACameraDevice.inc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <inttypes.h>
+#include "ACameraDevice.h"
+#include "ACameraMetadata.h"
+#include "ACaptureRequest.h"
+#include "ACameraCaptureSession.h"
+
+namespace android {
+namespace acam {
+
+template<class T>
+camera_status_t
+CameraDevice::captureLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ return submitRequestsLocked(
+ session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/false);
+}
+
+template<class T>
+camera_status_t
+CameraDevice::setRepeatingRequestsLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ return submitRequestsLocked(
+ session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/true);
+}
+
+template<class T>
+camera_status_t CameraDevice::submitRequestsLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId,
+ bool isRepeating) {
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera %s submit capture request failed! ret %d", getId(), ret);
+ return ret;
+ }
+
+ // Form two vectors of capture request, one for internal tracking
+ std::vector<hardware::camera2::CaptureRequest> requestList;
+ Vector<sp<CaptureRequest> > requestsV;
+ requestsV.setCapacity(numRequests);
+ for (int i = 0; i < numRequests; i++) {
+ sp<CaptureRequest> req;
+ ret = allocateCaptureRequest(requests[i], req);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Convert capture request to internal format failure! ret %d", ret);
+ return ret;
+ }
+ if (req->mSurfaceList.empty()) {
+ ALOGE("Capture request without output target cannot be submitted!");
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ requestList.push_back(*(req.get()));
+ requestsV.push_back(req);
+ }
+
+ if (isRepeating) {
+ ret = stopRepeatingLocked();
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
+ return ret;
+ }
+ }
+
+ binder::Status remoteRet;
+ hardware::camera2::utils::SubmitInfo info;
+ remoteRet = mRemote->submitRequestList(requestList, isRepeating, &info);
+ int sequenceId = info.mRequestId;
+ int64_t lastFrameNumber = info.mLastFrameNumber;
+ if (sequenceId < 0) {
+ ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
+ return ACAMERA_ERROR_UNKNOWN;
+ }
+
+ CallbackHolder cbHolder(session, requestsV, isRepeating, cbs);
+ mSequenceCallbackMap.insert(std::make_pair(sequenceId, cbHolder));
+
+ if (isRepeating) {
+ // stopRepeating above should have cleanup repeating sequence id
+ if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+ setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+ return ACAMERA_ERROR_CAMERA_DEVICE;
+ }
+ mRepeatingSequenceId = sequenceId;
+ } else {
+ mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
+ }
+
+ if (mIdle) {
+ sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+ msg->setPointer(kContextKey, session->mUserSessionCallback.context);
+ msg->setObject(kSessionSpKey, session);
+ msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
+ postSessionMsgAndCleanup(msg);
+ }
+ mIdle = false;
+ mBusySession = session;
+
+ if (captureSequenceId) {
+ *captureSequenceId = sequenceId;
+ }
+ return ACAMERA_OK;
+}
+
+} // namespace acam
+} // namespace android
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 94b5713..c661233 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -50,6 +50,7 @@
case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS:
case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE:
case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT:
+ case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA:
return true;
case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING:
case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING:
@@ -79,11 +80,41 @@
uint8_t capability = entry.data.u8[i];
if (isNdkSupportedCapability(capability)) {
capabilities.push(capability);
+
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ derivePhysicalCameraIds();
+ }
}
}
mData.update(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capabilities);
}
+void
+ACameraMetadata::derivePhysicalCameraIds() {
+ ACameraMetadata_const_entry entry;
+ auto ret = getConstEntry(ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS, &entry);
+ if (ret != ACAMERA_OK) {
+ ALOGE("%s: Get ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS key failed. ret %d",
+ __FUNCTION__, ret);
+ return;
+ }
+
+ const uint8_t* ids = entry.data.u8;
+ size_t start = 0;
+ for (size_t i = 0; i < entry.count; ++i) {
+ if (ids[i] == '\0') {
+ if (start != i) {
+ mStaticPhysicalCameraIds.push_back((const char*)ids+start);
+ }
+ start = i+1;
+ }
+ }
+
+ if (mStaticPhysicalCameraIds.size() < 2) {
+ ALOGW("%s: Logical multi-camera device only has %zu physical cameras",
+ __FUNCTION__, mStaticPhysicalCameraIds.size());
+ }
+}
void
ACameraMetadata::filterDurations(uint32_t tag) {
@@ -309,6 +340,27 @@
return mData;
}
+bool
+ACameraMetadata::isLogicalMultiCamera(size_t* count, const char*const** physicalCameraIds) const {
+ if (mType != ACM_CHARACTERISTICS) {
+ ALOGE("%s must be called for a static metadata!", __FUNCTION__);
+ return false;
+ }
+ if (count == nullptr || physicalCameraIds == nullptr) {
+ ALOGE("%s: Invalid input count: %p, physicalCameraIds: %p", __FUNCTION__,
+ count, physicalCameraIds);
+ return false;
+ }
+
+ if (mStaticPhysicalCameraIds.size() >= 2) {
+ *count = mStaticPhysicalCameraIds.size();
+ *physicalCameraIds = mStaticPhysicalCameraIds.data();
+ return true;
+ }
+
+ return false;
+}
+
// TODO: some of key below should be hidden from user
// ex: ACAMERA_REQUEST_ID and ACAMERA_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR
/*@O~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index f21dbaf..7049c4b 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -17,6 +17,7 @@
#define _ACAMERA_METADATA_H
#include <unordered_set>
+#include <vector>
#include <sys/types.h>
#include <utils/Mutex.h>
@@ -65,6 +66,7 @@
/*out*/const uint32_t** tags) const;
const CameraMetadata& getInternalData() const;
+ bool isLogicalMultiCamera(size_t* count, const char* const** physicalCameraIds) const;
private:
@@ -74,6 +76,7 @@
void filterUnsupportedFeatures(); // Hide features not yet supported by NDK
void filterStreamConfigurations(); // Hide input streams, translate hal format to NDK formats
void filterDurations(uint32_t tag); // translate hal format to NDK formats
+ void derivePhysicalCameraIds(); // Derive array of physical ids.
template<typename INTERNAL_T, typename NDK_T>
camera_status_t updateImpl(uint32_t tag, uint32_t count, const NDK_T* data) {
@@ -112,6 +115,8 @@
const ACAMERA_METADATA_TYPE mType;
static std::unordered_set<uint32_t> sSystemTags;
+
+ std::vector<const char*> mStaticPhysicalCameraIds;
};
#endif // _ACAMERA_METADATA_H
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 1244582..d13a818 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -643,6 +643,103 @@
ACaptureSessionOutput* output) __INTRODUCED_IN(28);
#endif /* __ANDROID_API__ >= 28 */
+#if __ANDROID_API__ >= 29
+/**
+ * The definition of final capture result callback with logical multi-camera support.
+ *
+ * This has the same functionality as final ACameraCaptureSession_captureCallback_result, with
+ * added ability to return physical camera result metadata within a logical multi-camera.
+ *
+ * For a logical multi-camera, this function will be called with the Id and result metadata
+ * of the underlying physical cameras, which the corresponding capture request contains targets for.
+ * If the capture request doesn't contain targets specific to any physical camera, or the current
+ * camera device isn't a logical multi-camera, physicalResultCount will be 0.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ * capture request sent by application, so the address is different to what
+ * application sent but the content will match. This request will be freed by
+ * framework immediately after this callback returns.
+ * @param result The capture result metadata reported by camera device. The memory is managed by
+ * camera framework. Do not access this pointer after this callback returns.
+ * @param physicalResultCount The number of physical camera result metadata
+ * @param physicalCameraIds The array of physical camera IDs on which the
+ * physical result metadata are reported.
+ * @param physicalResults The array of capture result metadata reported by the
+ * physical camera devices.
+ */
+typedef void (*ACameraCaptureSession_logicalCamera_captureCallback_result)(
+ void* context, ACameraCaptureSession* session,
+ ACaptureRequest* request, const ACameraMetadata* result,
+ size_t physicalResultCount, const char** physicalCameraIds,
+ const ACameraMetadata** physicalResults);
+
+/**
+ * This has the same functionality as ACameraCaptureSession_captureCallbacks,
+ * with the exception that an onLogicalCameraCaptureCompleted callback is
+ * used, instead of onCaptureCompleted, to support logical multi-camera.
+ */
+typedef struct ACameraCaptureSession_logicalCamera_captureCallbacks {
+ /**
+ * Same as ACameraCaptureSession_captureCallbacks
+ */
+ void* context;
+ ACameraCaptureSession_captureCallback_start onCaptureStarted;
+ ACameraCaptureSession_captureCallback_result onCaptureProgressed;
+
+ /**
+ * This callback is called when an image capture has fully completed and all the
+ * result metadata is available. For a logical multi-camera, this callback
+ * also returns the result metadata for all physical cameras being
+ * explicitly requested on.
+ *
+ * <p>This callback will always fire after the last {@link onCaptureProgressed};
+ * in other words, no more partial results will be delivered once the completed result
+ * is available.</p>
+ *
+ * <p>For performance-intensive use-cases where latency is a factor, consider
+ * using {@link onCaptureProgressed} instead.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+ */
+ ACameraCaptureSession_logicalCamera_captureCallback_result onLogicalCameraCaptureCompleted;
+
+ /**
+ * Same as ACameraCaptureSession_captureCallbacks
+ */
+ ACameraCaptureSession_captureCallback_failed onCaptureFailed;
+ ACameraCaptureSession_captureCallback_sequenceEnd onCaptureSequenceCompleted;
+ ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+ ACameraCaptureSession_captureCallback_bufferLost onCaptureBufferLost;
+} ACameraCaptureSession_logicalCamera_captureCallbacks;
+
+/**
+ * This has the same functionality as ACameraCaptureSession_capture, with added
+ * support for logical multi-camera where the capture callbacks supports result metadata for
+ * physical cameras.
+ */
+camera_status_t ACameraCaptureSession_logicalCamera_capture(
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacks* callbacks,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) __INTRODUCED_IN(29);
+
+/**
+ * This has the same functionality as ACameraCaptureSession_setRepeatingRequest, with added
+ * support for logical multi-camera where the capture callbacks supports result metadata for
+ * physical cameras.
+ */
+camera_status_t ACameraCaptureSession_logicalCamera_setRepeatingRequest(
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacks* callbacks,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) __INTRODUCED_IN(29);
+
+#endif /* __ANDROID_API__ >= 29 */
+
__END_DECLS
#endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index 4fe43d5..26af4f8 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -765,6 +765,36 @@
#endif /* __ANDROID_API__ >= 28 */
+#if __ANDROID_API__ >= 29
+
+/**
+ * Create a ACaptureSessionOutput object used for streaming from a physical
+ * camera as part of a logical camera device.
+ *
+ * <p>The ACaptureSessionOutput is used in {@link ACaptureSessionOutputContainer_add} method to add
+ * an output {@link ANativeWindow} to ACaptureSessionOutputContainer. Use
+ * {@link ACaptureSessionOutput_free} to free the object and its memory after application no longer
+ * needs the {@link ACaptureSessionOutput}.</p>
+ *
+ * @param anw the {@link ANativeWindow} to be associated with the {@link ACaptureSessionOutput}
+ * @param physicalId the Id of the physical camera this output is associated
+ * with.
+ * @param output the output {@link ACaptureSessionOutput} will be stored here if the
+ * method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ * filled in the output argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw, physicalId or output is NULL.</li></ul>
+ *
+ * @see ACaptureSessionOutputContainer_add
+ */
+camera_status_t ACaptureSessionPhysicalOutput_create(
+ ACameraWindowType* anw, const char* physicalId,
+ /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(29);
+
+#endif /* __ANDROID_API__ >= 29 */
+
__END_DECLS
#endif /* _NDK_CAMERA_DEVICE_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index 611e270..9bbfb83 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -233,6 +233,28 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 29
+
+/**
+ * Helper function to check if a camera is logical multi-camera.
+ *
+ * <p> Check whether a camera device is a logical multi-camera based on its
+ * static metadata. If it is, also returns its physical sub camera Ids.</p>
+ *
+ * @param staticMetadata the static metadata of the camera being checked.
+ * @param numPhysicalCameras returns the number of physical cameras.
+ * @param physicalCameraIds returns the array of physical camera Ids backing this logical
+ * camera device. Note that this pointer is only valid
+ * during the lifetime of the staticMetadata object.
+ *
+ * @return true if this is a logical multi-camera, false otherwise.
+ */
+bool ACameraMetadata_isLogicalMultiCamera(const ACameraMetadata* staticMetadata,
+ /*out*/size_t* numPhysicalCameras, /*out*/const char* const** physicalCameraIds)
+ __INTRODUCED_IN(29);
+
+#endif /* __ANDROID_API__ >= 29 */
+
__END_DECLS
#endif /* _NDK_CAMERA_METADATA_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index cb474f4..4bb74cb 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5552,6 +5552,25 @@
ACAMERA_DEPTH_END,
/**
+ * <p>String containing the ids of the underlying physical cameras.</p>
+ *
+ * <p>Type: byte[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>For a logical camera, this is concatenation of all underlying physical camera ids.
+ * The null terminator for physical camera id must be preserved so that the whole string
+ * can be tokenized using '\0' to generate list of physical camera ids.</p>
+ * <p>For example, if the physical camera ids of the logical camera are "2" and "3", the
+ * value of this tag will be ['2', '\0', '3', '\0'].</p>
+ * <p>The number of physical camera ids must be no less than 2.</p>
+ */
+ ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS = // byte[n]
+ ACAMERA_LOGICAL_MULTI_CAMERA_START,
+ /**
* <p>The accuracy of frame timestamp synchronization between physical cameras</p>
*
* <p>Type: byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)</p>
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index a29e96d..5a00022 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -2,9 +2,11 @@
global:
ACameraCaptureSession_abortCaptures;
ACameraCaptureSession_capture;
+ ACameraCaptureSession_logicalCamera_capture; # introduced=29
ACameraCaptureSession_close;
ACameraCaptureSession_getDevice;
ACameraCaptureSession_setRepeatingRequest;
+ ACameraCaptureSession_logicalCamera_setRepeatingRequest; # introduced=29
ACameraCaptureSession_stopRepeating;
ACameraCaptureSession_updateSharedOutput; # introduced=28
ACameraDevice_close;
@@ -24,6 +26,7 @@
ACameraMetadata_free;
ACameraMetadata_getAllTags;
ACameraMetadata_getConstEntry;
+ ACameraMetadata_isLogicalMultiCamera; # introduced=29
ACameraOutputTarget_create;
ACameraOutputTarget_free;
ACaptureRequest_addTarget;
@@ -48,6 +51,7 @@
ACaptureSessionSharedOutput_create; # introduced=28
ACaptureSessionSharedOutput_add; # introduced=28
ACaptureSessionSharedOutput_remove; # introduced=28
+ ACaptureSessionPhysicalOutput_create; # introduced=29
ACaptureSessionOutput_free;
local:
*;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
index 8d9e90c..e1af8c1 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
@@ -14,11 +14,13 @@
* limitations under the License.
*/
+#include <string>
#include "utils.h"
struct ACaptureSessionOutput {
- explicit ACaptureSessionOutput(native_handle_t* window, bool isShared = false) :
- mWindow(window), mIsShared(isShared) {};
+ explicit ACaptureSessionOutput(native_handle_t* window, bool isShared = false,
+ const char* physicalCameraId = "") :
+ mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
bool operator == (const ACaptureSessionOutput& other) const {
return (mWindow == other.mWindow);
@@ -40,6 +42,7 @@
std::set<android::acam::utils::native_handle_ptr_wrapper> mSharedWindows;
bool mIsShared;
int mRotation = CAMERA3_STREAM_ROTATION_0;
+ std::string mPhysicalCameraId;
};
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 26e6b3c..f7863a5 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -29,6 +29,8 @@
#include "ACaptureRequest.h"
#include "utils.h"
+#include "ACameraCaptureSession.inc"
+
using namespace android;
namespace android {
@@ -47,6 +49,7 @@
const char* CameraDevice::kCaptureRequestKey = "CaptureRequest";
const char* CameraDevice::kTimeStampKey = "TimeStamp";
const char* CameraDevice::kCaptureResultKey = "CaptureResult";
+const char* CameraDevice::kPhysicalCaptureResultKey = "PhysicalCaptureResult";
const char* CameraDevice::kCaptureFailureKey = "CaptureFailure";
const char* CameraDevice::kSequenceIdKey = "SequenceId";
const char* CameraDevice::kFrameNumberKey = "FrameNumber";
@@ -206,28 +209,8 @@
return ACAMERA_OK;
}
-camera_status_t
-CameraDevice::captureLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- return submitRequestsLocked(
- session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/false);
-}
-
-camera_status_t
-CameraDevice::setRepeatingRequestsLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId) {
- return submitRequestsLocked(
- session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/true);
-}
-
-void addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
- sp<CaptureRequest> &req) {
+void CameraDevice::addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
+ sp<CaptureRequest> &req) {
CameraMetadata metadataCopy = aCaptureRequest->settings->getInternalData();
const camera_metadata_t *camera_metadata = metadataCopy.getAndLock();
HCameraMetadata hCameraMetadata;
@@ -237,101 +220,6 @@
req->mPhysicalCameraSettings[0].settings.metadata(std::move(hCameraMetadata));
}
-camera_status_t
-CameraDevice::submitRequestsLocked(
- sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
- int numRequests, ACaptureRequest** requests,
- /*optional*/int* captureSequenceId,
- bool isRepeating) {
- camera_status_t ret = checkCameraClosedOrErrorLocked();
- if (ret != ACAMERA_OK) {
- ALOGE("Camera %s submit capture request failed! ret %d", getId(), ret);
- return ret;
- }
-
- // Form two vectors of capture request, one for internal tracking
- std::vector<frameworks::cameraservice::device::V2_0::CaptureRequest> requestList;
- Vector<sp<CaptureRequest>> requestsV;
- requestsV.setCapacity(numRequests);
- for (int i = 0; i < numRequests; i++) {
- sp<CaptureRequest> req;
- ret = allocateCaptureRequest(requests[i], req);
- // We need to call this method since after submitRequestList is called,
- // the request metadata queue might have removed the capture request
- // metadata. Therefore we simply add the metadata to its wrapper class,
- // so that it can be retrived later.
- addRequestSettingsMetadata(requests[i], req);
- if (ret != ACAMERA_OK) {
- ALOGE("Convert capture request to internal format failure! ret %d", ret);
- return ret;
- }
- if (req->mCaptureRequest.streamAndWindowIds.size() == 0) {
- ALOGE("Capture request without output target cannot be submitted!");
- return ACAMERA_ERROR_INVALID_PARAMETER;
- }
- requestList.push_back(utils::convertToHidl(req.get()));
- requestsV.push_back(req);
- }
- if (isRepeating) {
- ret = stopRepeatingLocked();
- if (ret != ACAMERA_OK) {
- ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
- return ret;
- }
- }
-
- SubmitInfo info;
- Status status;
- auto remoteRet = mRemote->submitRequestList(requestList, isRepeating,
- [&status, &info](auto s, auto &submitInfo) {
- status = s;
- info = submitInfo;
- });
- if (!remoteRet.isOk()) {
- ALOGE("%s: Transaction error for submitRequestList call: %s", __FUNCTION__,
- remoteRet.description().c_str());
- }
- if (status != Status::NO_ERROR) {
- return utils::convertFromHidl(status);
- }
- int32_t sequenceId = info.requestId;
- int64_t lastFrameNumber = info.lastFrameNumber;
- if (sequenceId < 0) {
- ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
- return ACAMERA_ERROR_UNKNOWN;
- }
-
- CallbackHolder cbHolder(session, requestsV, isRepeating, cbs);
- mSequenceCallbackMap.insert(std::make_pair(sequenceId, cbHolder));
-
- if (isRepeating) {
- // stopRepeating above should have cleanup repeating sequence id
- if (mRepeatingSequenceId != REQUEST_ID_NONE) {
- setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
- return ACAMERA_ERROR_CAMERA_DEVICE;
- }
- mRepeatingSequenceId = sequenceId;
- } else {
- mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
- }
-
- if (mIdle) {
- sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
- msg->setPointer(kContextKey, session->mUserSessionCallback.context);
- msg->setObject(kSessionSpKey, session);
- msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
- postSessionMsgAndCleanup(msg);
- }
- mIdle = false;
- mBusySession = session;
-
- if (captureSequenceId) {
- *captureSequenceId = sequenceId;
- }
- return ACAMERA_OK;
-}
-
camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
@@ -365,6 +253,7 @@
outConfig.windowGroupId = -1; // ndk doesn't support inter OutputConfiguration buffer sharing.
outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
outConfig.windowHandles[0] = output->mWindow;
+ outConfig.physicalCameraId = output->mPhysicalCameraId;
int i = 1;
for (auto& anw : output->mSharedWindows) {
outConfig.windowHandles[i++] = anw;
@@ -668,6 +557,7 @@
outConfigInsert.windowGroupId = -1;
outConfigInsert.windowHandles.resize(outConfig.mSharedWindows.size() + 1);
outConfigInsert.windowHandles[0] = anw;
+ outConfigInsert.physicalCameraId = outConfig.mPhysicalCameraId;
native_handle_ptr_wrapper wrap(anw);
outputSet.insert(std::make_pair(anw, outConfigInsertW));
}
@@ -894,7 +784,7 @@
if (errorCode == ErrorCode::CAMERA_BUFFER) {
int32_t streamId = resultExtras.errorStreamId;
ACameraCaptureSession_captureCallback_bufferLost onBufferLost =
- cbh.mCallbacks.onCaptureBufferLost;
+ cbh.mOnCaptureBufferLost;
auto outputPairIt = mConfiguredOutputs.find(streamId);
if (outputPairIt == mConfiguredOutputs.end()) {
ALOGE("%s: Error: stream id %d does not exist", __FUNCTION__, streamId);
@@ -913,7 +803,7 @@
getId(), anw, frameNumber);
sp<AMessage> msg = new AMessage(kWhatCaptureBufferLost, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onBufferLost);
msg->setObject(kCaptureRequestKey, request);
@@ -925,7 +815,7 @@
}
} else { // Handle other capture failures
// Fire capture failure callback if there is one registered
- ACameraCaptureSession_captureCallback_failed onError = cbh.mCallbacks.onCaptureFailed;
+ ACameraCaptureSession_captureCallback_failed onError = cbh.mOnCaptureFailed;
sp<CameraCaptureFailure> failure(new CameraCaptureFailure());
failure->frameNumber = frameNumber;
// TODO: refine this when implementing flush
@@ -934,7 +824,7 @@
failure->wasImageCaptured = (errorCode == ErrorCode::CAMERA_RESULT);
sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onError);
msg->setObject(kCaptureRequestKey, request);
@@ -956,6 +846,7 @@
case kWhatSessionStateCb:
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureSeqEnd:
case kWhatCaptureSeqAbort:
@@ -1026,6 +917,7 @@
case kWhatSessionStateCb:
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureSeqEnd:
case kWhatCaptureSeqAbort:
@@ -1043,6 +935,7 @@
switch (msg->what()) {
case kWhatCaptureStart:
case kWhatCaptureResult:
+ case kWhatLogicalCaptureResult:
case kWhatCaptureFail:
case kWhatCaptureBufferLost:
found = msg->findObject(kCaptureRequestKey, &obj);
@@ -1114,6 +1007,62 @@
freeACaptureRequest(request);
break;
}
+ case kWhatLogicalCaptureResult:
+ {
+ ACameraCaptureSession_logicalCamera_captureCallback_result onResult;
+ found = msg->findPointer(kCallbackFpKey, (void**) &onResult);
+ if (!found) {
+ ALOGE("%s: Cannot find capture result callback!", __FUNCTION__);
+ return;
+ }
+ if (onResult == nullptr) {
+ return;
+ }
+
+ found = msg->findObject(kCaptureResultKey, &obj);
+ if (!found) {
+ ALOGE("%s: Cannot find capture result!", __FUNCTION__);
+ return;
+ }
+ sp<ACameraMetadata> result(static_cast<ACameraMetadata*>(obj.get()));
+
+ found = msg->findObject(kPhysicalCaptureResultKey, &obj);
+ if (!found) {
+ ALOGE("%s: Cannot find physical capture result!", __FUNCTION__);
+ return;
+ }
+ sp<ACameraPhysicalCaptureResultInfo> physicalResult(
+ static_cast<ACameraPhysicalCaptureResultInfo*>(obj.get()));
+ std::vector<PhysicalCaptureResultInfoLocal>& physicalResultInfo =
+ physicalResult->mPhysicalResultInfo;
+
+ std::vector<std::string> physicalCameraIds;
+ std::vector<sp<ACameraMetadata>> physicalMetadataCopy;
+ for (size_t i = 0; i < physicalResultInfo.size(); i++) {
+ physicalCameraIds.push_back(physicalResultInfo[i].physicalCameraId);
+
+ CameraMetadata clone = physicalResultInfo[i].physicalMetadata;
+ clone.update(ANDROID_SYNC_FRAME_NUMBER,
+ &physicalResult->mFrameNumber, /*data_count*/1);
+ sp<ACameraMetadata> metadata =
+ new ACameraMetadata(clone.release(), ACameraMetadata::ACM_RESULT);
+ physicalMetadataCopy.push_back(metadata);
+ }
+ std::vector<const char*> physicalCameraIdPtrs;
+ std::vector<const ACameraMetadata*> physicalMetadataCopyPtrs;
+ for (size_t i = 0; i < physicalResultInfo.size(); i++) {
+ physicalCameraIdPtrs.push_back(physicalCameraIds[i].c_str());
+ physicalMetadataCopyPtrs.push_back(physicalMetadataCopy[i].get());
+ }
+
+ ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ (*onResult)(context, session.get(), request, result.get(),
+ physicalResultInfo.size(), physicalCameraIdPtrs.data(),
+ physicalMetadataCopyPtrs.data());
+ freeACaptureRequest(request);
+ break;
+ }
+
case kWhatCaptureFail:
{
ACameraCaptureSession_captureCallback_failed onFail;
@@ -1224,12 +1173,34 @@
}
CameraDevice::CallbackHolder::CallbackHolder(
- sp<ACameraCaptureSession> session,
- const Vector<sp<CaptureRequest> >& requests,
- bool isRepeating,
- ACameraCaptureSession_captureCallbacks* cbs) :
- mSession(session), mRequests(requests),
- mIsRepeating(isRepeating), mCallbacks(fillCb(cbs)) {}
+ sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest> >& requests,
+ bool isRepeating,
+ ACameraCaptureSession_captureCallbacks* cbs) :
+ mSession(session), mRequests(requests),
+ mIsRepeating(isRepeating),
+ mIsLogicalCameraCallback(false) {
+ initCaptureCallbacks(cbs);
+
+ if (cbs != nullptr) {
+ mOnCaptureCompleted = cbs->onCaptureCompleted;
+ }
+}
+
+CameraDevice::CallbackHolder::CallbackHolder(
+ sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest> >& requests,
+ bool isRepeating,
+ ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs) :
+ mSession(session), mRequests(requests),
+ mIsRepeating(isRepeating),
+ mIsLogicalCameraCallback(true) {
+ initCaptureCallbacks(lcbs);
+
+ if (lcbs != nullptr) {
+ mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+ }
+}
void
CameraDevice::checkRepeatingSequenceCompleteLocked(
@@ -1246,9 +1217,9 @@
mSequenceCallbackMap.erase(cbIt);
// send seq aborted callback
sp<AMessage> msg = new AMessage(kWhatCaptureSeqAbort, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, cbh.mSession);
- msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceAborted);
+ msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceAborted);
msg->setInt32(kSequenceIdKey, sequenceId);
postSessionMsgAndCleanup(msg);
} else {
@@ -1295,9 +1266,9 @@
mSequenceCallbackMap.erase(cbIt);
// send seq complete callback
sp<AMessage> msg = new AMessage(kWhatCaptureSeqEnd, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, cbh.mSession);
- msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceCompleted);
+ msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceCompleted);
msg->setInt32(kSequenceIdKey, sequenceId);
msg->setInt64(kFrameNumberKey, lastFrameNumber);
@@ -1454,7 +1425,7 @@
auto it = dev->mSequenceCallbackMap.find(sequenceId);
if (it != dev->mSequenceCallbackMap.end()) {
CallbackHolder cbh = (*it).second;
- ACameraCaptureSession_captureCallback_start onStart = cbh.mCallbacks.onCaptureStarted;
+ ACameraCaptureSession_captureCallback_start onStart = cbh.mOnCaptureStarted;
sp<ACameraCaptureSession> session = cbh.mSession;
if ((size_t) burstId >= cbh.mRequests.size()) {
ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1463,7 +1434,7 @@
}
sp<CaptureRequest> request = cbh.mRequests[burstId];
sp<AMessage> msg = new AMessage(kWhatCaptureStart, dev->mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) onStart);
msg->setObject(kCaptureRequestKey, request);
@@ -1478,7 +1449,6 @@
const FmqSizeOrMetadata& resultMetadata,
const CaptureResultExtras& resultExtras,
const hidl_vec<PhysicalCaptureResultInfo>& physicalResultInfos) {
- (void) physicalResultInfos;
auto ret = Void();
sp<CameraDevice> dev = mDevice.promote();
@@ -1508,27 +1478,10 @@
}
CameraMetadata metadataCopy;
- HCameraMetadata hCameraMetadata;
- bool converted = false;
- if (resultMetadata.getDiscriminator() ==
- FmqSizeOrMetadata::hidl_discriminator::fmqMetadataSize) {
- hCameraMetadata.resize(resultMetadata.fmqMetadataSize());
- bool read = dev->mCaptureResultMetadataQueue->read(hCameraMetadata.data(),
- resultMetadata.fmqMetadataSize());
- if (!read) {
- ALOGE("%s capture request settings could't be read from fmq",
- __FUNCTION__);
- return ret;
- }
- // TODO: Do we actually need to clone here ?
- converted = utils::convertFromHidlCloned(hCameraMetadata, &metadataCopy);
-
- } else {
- converted = utils::convertFromHidlCloned(resultMetadata.metadata(), &metadataCopy);
- }
-
- if (!converted) {
- ALOGE("%s result metadata couldn't be converted", __FUNCTION__);
+ camera_status_t status = readOneResultMetadata(resultMetadata,
+ dev->mCaptureResultMetadataQueue.get(), &metadataCopy);
+ if (status != ACAMERA_OK) {
+ ALOGE("%s: result metadata couldn't be converted", __FUNCTION__);
return ret;
}
@@ -1538,9 +1491,6 @@
auto it = dev->mSequenceCallbackMap.find(sequenceId);
if (it != dev->mSequenceCallbackMap.end()) {
CallbackHolder cbh = (*it).second;
- ACameraCaptureSession_captureCallback_result onResult = isPartialResult ?
- cbh.mCallbacks.onCaptureProgressed :
- cbh.mCallbacks.onCaptureCompleted;
sp<ACameraCaptureSession> session = cbh.mSession;
if ((size_t) burstId >= cbh.mRequests.size()) {
ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1551,12 +1501,39 @@
sp<ACameraMetadata> result(new ACameraMetadata(
metadataCopy.release(), ACameraMetadata::ACM_RESULT));
- sp<AMessage> msg = new AMessage(kWhatCaptureResult, dev->mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ std::vector<PhysicalCaptureResultInfoLocal> localPhysicalResult;
+ localPhysicalResult.resize(physicalResultInfos.size());
+ for (size_t i = 0; i < physicalResultInfos.size(); i++) {
+ localPhysicalResult[i].physicalCameraId = physicalResultInfos[i].physicalCameraId;
+ status = readOneResultMetadata(physicalResultInfos[i].physicalCameraMetadata,
+ dev->mCaptureResultMetadataQueue.get(),
+ &localPhysicalResult[i].physicalMetadata);
+ if (status != ACAMERA_OK) {
+ ALOGE("%s: physical camera result metadata couldn't be converted", __FUNCTION__);
+ return ret;
+ }
+ }
+ sp<ACameraPhysicalCaptureResultInfo> physicalResult(
+ new ACameraPhysicalCaptureResultInfo(localPhysicalResult, frameNumber));
+
+ sp<AMessage> msg = new AMessage(
+ cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureResult : kWhatCaptureResult,
+ dev->mHandler);
+ msg->setPointer(kContextKey, cbh.mContext);
msg->setObject(kSessionSpKey, session);
- msg->setPointer(kCallbackFpKey, (void*) onResult);
msg->setObject(kCaptureRequestKey, request);
msg->setObject(kCaptureResultKey, result);
+ if (isPartialResult) {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnCaptureProgressed);
+ } else if (cbh.mIsLogicalCameraCallback) {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnLogicalCameraCaptureCompleted);
+ msg->setObject(kPhysicalCaptureResultKey, physicalResult);
+ } else {
+ msg->setPointer(kCallbackFpKey,
+ (void *)cbh.mOnCaptureCompleted);
+ }
dev->postSessionMsgAndCleanup(msg);
}
@@ -1590,5 +1567,31 @@
return ret;
}
+camera_status_t CameraDevice::ServiceCallback::readOneResultMetadata(
+ const FmqSizeOrMetadata& fmqSizeOrMetadata, ResultMetadataQueue* metadataQueue,
+ CameraMetadata* metadata) {
+ if (metadataQueue == nullptr || metadata == nullptr) {
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ bool converted;
+ HCameraMetadata hCameraMetadata;
+ if (fmqSizeOrMetadata.getDiscriminator() ==
+ FmqSizeOrMetadata::hidl_discriminator::fmqMetadataSize) {
+ hCameraMetadata.resize(fmqSizeOrMetadata.fmqMetadataSize());
+ bool read = metadataQueue->read(
+ hCameraMetadata.data(), fmqSizeOrMetadata.fmqMetadataSize());
+ if (!read) {
+ ALOGE("%s capture request settings could't be read from fmq", __FUNCTION__);
+ return ACAMERA_ERROR_UNKNOWN;
+ }
+ // TODO: Do we actually need to clone here ?
+ converted = utils::convertFromHidlCloned(hCameraMetadata, metadata);
+ } else {
+ converted = utils::convertFromHidlCloned(fmqSizeOrMetadata.metadata(), metadata);
+ }
+
+ return converted ? ACAMERA_OK : ACAMERA_ERROR_UNKNOWN;
+}
+
} // namespace acam
} // namespace android
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 01a219f..c63b97f 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -21,6 +21,7 @@
#include <set>
#include <atomic>
#include <utility>
+#include <vector>
#include <utils/StrongPointer.h>
#include <utils/Mutex.h>
#include <utils/List.h>
@@ -65,6 +66,21 @@
// Wrap ACameraCaptureFailure so it can be ref-counted
struct CameraCaptureFailure : public RefBase, public ACameraCaptureFailure { };
+// Wrap PhysicalCaptureResultInfo so that it can be ref-counted
+struct PhysicalCaptureResultInfoLocal {
+ std::string physicalCameraId;
+ CameraMetadata physicalMetadata;
+};
+
+struct ACameraPhysicalCaptureResultInfo: public RefBase {
+ ACameraPhysicalCaptureResultInfo(const std::vector<PhysicalCaptureResultInfoLocal>& info,
+ int64_t frameNumber) :
+ mPhysicalResultInfo(info), mFrameNumber(frameNumber) {}
+
+ std::vector<PhysicalCaptureResultInfoLocal> mPhysicalResultInfo;
+ int64_t mFrameNumber;
+};
+
class CameraDevice final : public RefBase {
public:
CameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
@@ -99,6 +115,8 @@
android::hardware::Return<void> onRepeatingRequestError(uint64_t lastFrameNumber,
int32_t stoppedSequenceId) override;
private:
+ camera_status_t readOneResultMetadata(const FmqSizeOrMetadata& fmqSizeOrMetadata,
+ ResultMetadataQueue* metadataQueue, CameraMetadata* metadata);
const wp<CameraDevice> mDevice;
};
inline sp<ICameraDeviceCallback> getServiceCallback() {
@@ -127,24 +145,28 @@
camera_status_t waitUntilIdleLocked();
-
+ template<class T>
camera_status_t captureLocked(sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ template<class T>
camera_status_t setRepeatingRequestsLocked(sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ template<class T>
camera_status_t submitRequestsLocked(
sp<ACameraCaptureSession> session,
- /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+ /*optional*/T* cbs,
int numRequests, ACaptureRequest** requests,
/*out*/int* captureSequenceId,
bool isRepeating);
+ void addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest, sp<CaptureRequest> &req);
+
camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
camera_status_t allocateCaptureRequest(
@@ -206,6 +228,7 @@
// Capture callbacks
kWhatCaptureStart, // onCaptureStarted
kWhatCaptureResult, // onCaptureProgressed, onCaptureCompleted
+ kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
kWhatCaptureFail, // onCaptureFailed
kWhatCaptureSeqEnd, // onCaptureSequenceCompleted
kWhatCaptureSeqAbort, // onCaptureSequenceAborted
@@ -221,6 +244,7 @@
static const char* kCaptureRequestKey;
static const char* kTimeStampKey;
static const char* kCaptureResultKey;
+ static const char* kPhysicalCaptureResultKey;
static const char* kCaptureFailureKey;
static const char* kSequenceIdKey;
static const char* kFrameNumberKey;
@@ -259,19 +283,47 @@
const Vector<sp<CaptureRequest>>& requests,
bool isRepeating,
ACameraCaptureSession_captureCallbacks* cbs);
+ CallbackHolder(sp<ACameraCaptureSession> session,
+ const Vector<sp<CaptureRequest>>& requests,
+ bool isRepeating,
+ ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs);
- static ACameraCaptureSession_captureCallbacks fillCb(
- ACameraCaptureSession_captureCallbacks* cbs) {
+ template <class T>
+ void initCaptureCallbacks(T* cbs) {
+ mContext = nullptr;
+ mOnCaptureStarted = nullptr;
+ mOnCaptureProgressed = nullptr;
+ mOnCaptureCompleted = nullptr;
+ mOnLogicalCameraCaptureCompleted = nullptr;
+ mOnCaptureFailed = nullptr;
+ mOnCaptureSequenceCompleted = nullptr;
+ mOnCaptureSequenceAborted = nullptr;
+ mOnCaptureBufferLost = nullptr;
if (cbs != nullptr) {
- return *cbs;
+ mContext = cbs->context;
+ mOnCaptureStarted = cbs->onCaptureStarted;
+ mOnCaptureProgressed = cbs->onCaptureProgressed;
+ mOnCaptureFailed = cbs->onCaptureFailed;
+ mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
+ mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
+ mOnCaptureBufferLost = cbs->onCaptureBufferLost;
}
- return { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
}
sp<ACameraCaptureSession> mSession;
- Vector<sp<CaptureRequest>> mRequests;
+ Vector<sp<CaptureRequest>> mRequests;
const bool mIsRepeating;
- ACameraCaptureSession_captureCallbacks mCallbacks;
+ const bool mIsLogicalCameraCallback;
+
+ void* mContext;
+ ACameraCaptureSession_captureCallback_start mOnCaptureStarted;
+ ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
+ ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
+ ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+ ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
+ ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
+ ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
+ ACameraCaptureSession_captureCallback_bufferLost mOnCaptureBufferLost;
};
// sequence id -> callbacks map
std::map<int, CallbackHolder> mSequenceCallbackMap;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
new file mode 100644
index 0000000..7d2304e
--- /dev/null
+++ b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <inttypes.h>
+#include <android/frameworks/cameraservice/service/2.0/ICameraService.h>
+#include <android/frameworks/cameraservice/device/2.0/types.h>
+#include <CameraMetadata.h>
+
+#include "ndk_vendor/impl/ACameraDevice.h"
+#include "ACameraCaptureSession.h"
+#include "ACameraMetadata.h"
+#include "ACaptureRequest.h"
+#include "utils.h"
+
+using namespace android;
+
+namespace android {
+namespace acam {
+
+template<class T>
+camera_status_t
+CameraDevice::captureLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ return submitRequestsLocked(
+ session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/false);
+}
+
+template<class T>
+camera_status_t
+CameraDevice::setRepeatingRequestsLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*optional*/int* captureSequenceId) {
+ return submitRequestsLocked(
+ session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/true);
+}
+
+template<class T>
+camera_status_t CameraDevice::submitRequestsLocked(
+ sp<ACameraCaptureSession> session,
+ /*optional*/T* cbs,
+ int numRequests, ACaptureRequest** requests,
+ /*out*/int* captureSequenceId,
+ bool isRepeating)
+{
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera %s submit capture request failed! ret %d", getId(), ret);
+ return ret;
+ }
+
+ // Form two vectors of capture request, one for internal tracking
+ std::vector<frameworks::cameraservice::device::V2_0::CaptureRequest> requestList;
+ Vector<sp<CaptureRequest>> requestsV;
+ requestsV.setCapacity(numRequests);
+ for (int i = 0; i < numRequests; i++) {
+ sp<CaptureRequest> req;
+ ret = allocateCaptureRequest(requests[i], req);
+ // We need to call this method since after submitRequestList is called,
+ // the request metadata queue might have removed the capture request
+ // metadata. Therefore we simply add the metadata to its wrapper class,
+ // so that it can be retrieved later.
+ addRequestSettingsMetadata(requests[i], req);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Convert capture request to internal format failure! ret %d", ret);
+ return ret;
+ }
+ if (req->mCaptureRequest.streamAndWindowIds.size() == 0) {
+ ALOGE("Capture request without output target cannot be submitted!");
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ requestList.push_back(utils::convertToHidl(req.get()));
+ requestsV.push_back(req);
+ }
+ if (isRepeating) {
+ ret = stopRepeatingLocked();
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
+ return ret;
+ }
+ }
+
+ SubmitInfo info;
+ Status status;
+ auto remoteRet = mRemote->submitRequestList(requestList, isRepeating,
+ [&status, &info](auto s, auto &submitInfo) {
+ status = s;
+ info = submitInfo;
+ });
+ if (!remoteRet.isOk()) {
+ ALOGE("%s: Transaction error for submitRequestList call: %s", __FUNCTION__,
+ remoteRet.description().c_str());
+ }
+ if (status != Status::NO_ERROR) {
+ return utils::convertFromHidl(status);
+ }
+ int32_t sequenceId = info.requestId;
+ int64_t lastFrameNumber = info.lastFrameNumber;
+ if (sequenceId < 0) {
+ ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
+ return ACAMERA_ERROR_UNKNOWN;
+ }
+
+ CallbackHolder cbHolder(session, requestsV, isRepeating, cbs);
+ mSequenceCallbackMap.insert(std::make_pair(sequenceId, cbHolder));
+ if (isRepeating) {
+ // stopRepeating above should have cleanup repeating sequence id
+ if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+ setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+ return ACAMERA_ERROR_CAMERA_DEVICE;
+ }
+ mRepeatingSequenceId = sequenceId;
+ } else {
+ mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
+ }
+
+ if (mIdle) {
+ sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+ msg->setPointer(kContextKey, session->mUserSessionCallback.context);
+ msg->setObject(kSessionSpKey, session);
+ msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
+ postSessionMsgAndCleanup(msg);
+ }
+ mIdle = false;
+ mBusySession = session;
+
+ if (captureSequenceId) {
+ *captureSequenceId = sequenceId;
+ }
+ return ACAMERA_OK;
+}
+
+} // namespace acam
+} // namespace android
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 579412e..f9bb3ac 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -55,38 +55,27 @@
class CameraHelper {
public:
- CameraHelper(native_handle_t* imgReaderAnw) : mImgReaderAnw(imgReaderAnw) {}
+ CameraHelper(const char* id, ACameraManager *manager) :
+ mImgReaderAnw(nullptr), mCameraId(id), mCameraManager(manager) {}
~CameraHelper() { closeCamera(); }
- int initCamera() {
- if (mImgReaderAnw == nullptr) {
+ struct PhysicalImgReaderInfo {
+ const char* physicalCameraId;
+ native_handle_t* anw;
+ };
+ int initCamera(native_handle_t* imgReaderAnw,
+ const std::vector<PhysicalImgReaderInfo>& physicalImgReaders) {
+ if (imgReaderAnw == nullptr) {
ALOGE("Cannot initialize camera before image reader get initialized.");
return -1;
}
+ if (mIsCameraReady) {
+ ALOGE("initCamera should only be called once.");
+ return -1;
+ }
+
int ret;
-
- mCameraManager = ACameraManager_create();
- if (mCameraManager == nullptr) {
- ALOGE("Failed to create ACameraManager.");
- return -1;
- }
-
- ret = ACameraManager_getCameraIdList(mCameraManager, &mCameraIdList);
- if (ret != AMEDIA_OK) {
- ALOGE("Failed to get cameraIdList: ret=%d", ret);
- return ret;
- }
- if (mCameraIdList->numCameras < 1) {
- ALOGW("Device has no camera on board.");
- return 0;
- }
-
- // We always use the first camera.
- mCameraId = mCameraIdList->cameraIds[0];
- if (mCameraId == nullptr) {
- ALOGE("Failed to get cameraId.");
- return -1;
- }
+ mImgReaderAnw = imgReaderAnw;
ret = ACameraManager_openCamera(mCameraManager, mCameraId, &mDeviceCb, &mDevice);
if (ret != AMEDIA_OK || mDevice == nullptr) {
@@ -94,18 +83,6 @@
return -1;
}
- ret = ACameraManager_getCameraCharacteristics(mCameraManager, mCameraId, &mCameraMetadata);
- if (ret != ACAMERA_OK || mCameraMetadata == nullptr) {
- ALOGE("Get camera %s characteristics failure. ret %d, metadata %p", mCameraId, ret,
- mCameraMetadata);
- return -1;
- }
-
- if (!isCapabilitySupported(ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE)) {
- ALOGW("Camera does not support BACKWARD_COMPATIBLE.");
- return 0;
- }
-
// Create capture session
ret = ACaptureSessionOutputContainer_create(&mOutputs);
if (ret != AMEDIA_OK) {
@@ -122,6 +99,25 @@
ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
return ret;
}
+
+ for (auto& physicalStream : physicalImgReaders) {
+ ACaptureSessionOutput* sessionOutput = nullptr;
+ ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
+ physicalStream.physicalCameraId, &sessionOutput);
+ if (ret != ACAMERA_OK) {
+ ALOGE("ACaptureSessionPhysicalOutput_create failed, ret=%d", ret);
+ return ret;
+ }
+ ret = ACaptureSessionOutputContainer_add(mOutputs, sessionOutput);
+ if (ret != AMEDIA_OK) {
+ ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
+ return ret;
+ }
+ mExtraOutputs.push_back(sessionOutput);
+ // Assume that at most one physical stream per physical camera.
+ mPhysicalCameraIds.push_back(physicalStream.physicalCameraId);
+ }
+
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
@@ -145,21 +141,25 @@
return ret;
}
+ for (auto& physicalStream : physicalImgReaders) {
+ ACameraOutputTarget* outputTarget = nullptr;
+ ret = ACameraOutputTarget_create(physicalStream.anw, &outputTarget);
+ if (ret != AMEDIA_OK) {
+ ALOGE("ACameraOutputTarget_create failed, ret=%d", ret);
+ return ret;
+ }
+ ret = ACaptureRequest_addTarget(mStillRequest, outputTarget);
+ if (ret != AMEDIA_OK) {
+ ALOGE("ACaptureRequest_addTarget failed, ret=%d", ret);
+ return ret;
+ }
+ mReqExtraOutputs.push_back(outputTarget);
+ }
+
mIsCameraReady = true;
return 0;
}
- bool isCapabilitySupported(acamera_metadata_enum_android_request_available_capabilities_t cap) {
- ACameraMetadata_const_entry entry;
- ACameraMetadata_getConstEntry(
- mCameraMetadata, ACAMERA_REQUEST_AVAILABLE_CAPABILITIES, &entry);
- for (uint32_t i = 0; i < entry.count; i++) {
- if (entry.data.u8[i] == cap) {
- return true;
- }
- }
- return false;
- }
bool isCameraReady() { return mIsCameraReady; }
@@ -169,6 +169,10 @@
ACameraOutputTarget_free(mReqImgReaderOutput);
mReqImgReaderOutput = nullptr;
}
+ for (auto& outputTarget : mReqExtraOutputs) {
+ ACameraOutputTarget_free(outputTarget);
+ }
+ mReqExtraOutputs.clear();
if (mStillRequest) {
ACaptureRequest_free(mStillRequest);
mStillRequest = nullptr;
@@ -182,6 +186,10 @@
ACaptureSessionOutput_free(mImgReaderOutput);
mImgReaderOutput = nullptr;
}
+ for (auto& extraOutput : mExtraOutputs) {
+ ACaptureSessionOutput_free(extraOutput);
+ }
+ mExtraOutputs.clear();
if (mOutputs) {
ACaptureSessionOutputContainer_free(mOutputs);
mOutputs = nullptr;
@@ -191,19 +199,6 @@
ACameraDevice_close(mDevice);
mDevice = nullptr;
}
- if (mCameraMetadata) {
- ACameraMetadata_free(mCameraMetadata);
- mCameraMetadata = nullptr;
- }
- // Destroy camera manager
- if (mCameraIdList) {
- ACameraManager_deleteCameraIdList(mCameraIdList);
- mCameraIdList = nullptr;
- }
- if (mCameraManager) {
- ACameraManager_delete(mCameraManager);
- mCameraManager = nullptr;
- }
mIsCameraReady = false;
}
@@ -213,6 +208,12 @@
&seqId);
}
+ int takeLogicalCameraPicture() {
+ int seqId;
+ return ACameraCaptureSession_logicalCamera_capture(mSession, &mLogicalCaptureCallbacks,
+ 1, &mStillRequest, &seqId);
+ }
+
bool checkCallbacks(int pictureCount) {
std::lock_guard<std::mutex> lock(mMutex);
if (mCompletedCaptureCallbackCount != pictureCount) {
@@ -241,22 +242,22 @@
native_handle_t* mImgReaderAnw = nullptr; // not owned by us.
- // Camera manager
- ACameraManager* mCameraManager = nullptr;
- ACameraIdList* mCameraIdList = nullptr;
// Camera device
- ACameraMetadata* mCameraMetadata = nullptr;
ACameraDevice* mDevice = nullptr;
// Capture session
ACaptureSessionOutputContainer* mOutputs = nullptr;
ACaptureSessionOutput* mImgReaderOutput = nullptr;
+ std::vector<ACaptureSessionOutput*> mExtraOutputs;
+
ACameraCaptureSession* mSession = nullptr;
// Capture request
ACaptureRequest* mStillRequest = nullptr;
ACameraOutputTarget* mReqImgReaderOutput = nullptr;
+ std::vector<ACameraOutputTarget*> mReqExtraOutputs;
bool mIsCameraReady = false;
const char* mCameraId;
+ ACameraManager* mCameraManager;
int mCompletedCaptureCallbackCount = 0;
std::mutex mMutex;
ACameraCaptureSession_captureCallbacks mCaptureCallbacks = {
@@ -264,7 +265,6 @@
this, // context
nullptr, // onCaptureStarted
nullptr, // onCaptureProgressed
- // onCaptureCompleted, called serially, so no lock needed.
[](void* ctx , ACameraCaptureSession *, ACaptureRequest *,
const ACameraMetadata *) {
CameraHelper *ch = static_cast<CameraHelper *>(ctx);
@@ -275,8 +275,44 @@
nullptr, // onCaptureSequenceCompleted
nullptr, // onCaptureSequenceAborted
nullptr, // onCaptureBufferLost
- };
+ };
+ std::vector<std::string> mPhysicalCameraIds;
+ ACameraCaptureSession_logicalCamera_captureCallbacks mLogicalCaptureCallbacks = {
+ // TODO: Add tests for other callbacks
+ this, // context
+ nullptr, // onCaptureStarted
+ nullptr, // onCaptureProgressed
+ [](void* ctx , ACameraCaptureSession *, ACaptureRequest *,
+ const ACameraMetadata *, size_t physicalResultCount,
+ const char** physicalCameraIds, const ACameraMetadata** physicalResults) {
+ CameraHelper *ch = static_cast<CameraHelper *>(ctx);
+ std::lock_guard<std::mutex> lock(ch->mMutex);
+ ASSERT_EQ(physicalResultCount, ch->mPhysicalCameraIds.size());
+ for (size_t i = 0; i < physicalResultCount; i++) {
+ ASSERT_TRUE(physicalCameraIds[i] != nullptr);
+ ASSERT_TRUE(physicalResults[i] != nullptr);
+ ASSERT_NE(std::find(ch->mPhysicalCameraIds.begin(),
+ ch->mPhysicalCameraIds.end(), physicalCameraIds[i]),
+ ch->mPhysicalCameraIds.end());
+
+ // Verify frameNumber and sensorTimestamp exist in physical
+ // result metadata
+ ACameraMetadata_const_entry entry;
+ ACameraMetadata_getConstEntry(
+ physicalResults[i], ACAMERA_SYNC_FRAME_NUMBER, &entry);
+ ASSERT_EQ(entry.count, 1);
+ ACameraMetadata_getConstEntry(
+ physicalResults[i], ACAMERA_SENSOR_TIMESTAMP, &entry);
+ ASSERT_EQ(entry.count, 1);
+ }
+ ch->mCompletedCaptureCallbackCount++;
+ },
+ nullptr, // onCaptureFailed
+ nullptr, // onCaptureSequenceCompleted
+ nullptr, // onCaptureSequenceAborted
+ nullptr, // onCaptureBufferLost
+ };
};
class ImageReaderTestCase {
@@ -476,84 +512,276 @@
AImageReader_BufferRemovedListener mReaderDetachedCb{this, onBufferRemoved};
};
-bool takePictures(uint64_t readerUsage, int readerMaxImages, bool readerAsync, int pictureCount) {
- int ret = 0;
- ImageReaderTestCase testCase(
- kTestImageWidth, kTestImageHeight, kTestImageFormat, readerUsage, readerMaxImages,
- readerAsync);
- ret = testCase.initImageReader();
- if (ret < 0) {
- ALOGE("Unable to initialize ImageReader");
+
+class AImageReaderVendorTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ mCameraManager = ACameraManager_create();
+ if (mCameraManager == nullptr) {
+ ALOGE("Failed to create ACameraManager.");
+ return;
+ }
+
+ camera_status_t ret = ACameraManager_getCameraIdList(mCameraManager, &mCameraIdList);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Failed to get cameraIdList: ret=%d", ret);
+ return;
+ }
+ if (mCameraIdList->numCameras < 1) {
+ ALOGW("Device has no camera on board.");
+ return;
+ }
+ }
+ void TearDown() override {
+ // Destroy camera manager
+ if (mCameraIdList) {
+ ACameraManager_deleteCameraIdList(mCameraIdList);
+ mCameraIdList = nullptr;
+ }
+ if (mCameraManager) {
+ ACameraManager_delete(mCameraManager);
+ mCameraManager = nullptr;
+ }
+ }
+
+ bool takePictures(const char* id, uint64_t readerUsage, int readerMaxImages,
+ bool readerAsync, int pictureCount) {
+ int ret = 0;
+
+ ImageReaderTestCase testCase(
+ kTestImageWidth, kTestImageHeight, kTestImageFormat, readerUsage, readerMaxImages,
+ readerAsync);
+ ret = testCase.initImageReader();
+ if (ret < 0) {
+ ALOGE("Unable to initialize ImageReader");
+ return false;
+ }
+
+ CameraHelper cameraHelper(id, mCameraManager);
+ ret = cameraHelper.initCamera(testCase.getNativeWindow(), {});
+ if (ret < 0) {
+ ALOGE("Unable to initialize camera helper");
+ return false;
+ }
+
+ if (!cameraHelper.isCameraReady()) {
+ ALOGW("Camera is not ready after successful initialization. It's either due to camera "
+ "on board lacks BACKWARDS_COMPATIBLE capability or the device does not have "
+ "camera on board.");
+ return true;
+ }
+
+ for (int i = 0; i < pictureCount; i++) {
+ ret = cameraHelper.takePicture();
+ if (ret < 0) {
+ ALOGE("Unable to take picture");
+ return false;
+ }
+ }
+
+ // Sleep until all capture finished
+ for (int i = 0; i < kCaptureWaitRetry * pictureCount; i++) {
+ usleep(kCaptureWaitUs);
+ if (testCase.getAcquiredImageCount() == pictureCount) {
+ ALOGI("Session take ~%d ms to capture %d images", i * kCaptureWaitUs / 1000,
+ pictureCount);
+ break;
+ }
+ }
+ return testCase.getAcquiredImageCount() == pictureCount &&
+ cameraHelper.checkCallbacks(pictureCount);
+ }
+
+ bool testTakePicturesNative(const char* id) {
+ for (auto& readerUsage :
+ {AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}) {
+ for (auto& readerMaxImages : {1, 4, 8}) {
+ for (auto& readerAsync : {true, false}) {
+ for (auto& pictureCount : {1, 4, 8}) {
+ if (!takePictures(id, readerUsage, readerMaxImages,
+ readerAsync, pictureCount)) {
+ ALOGE("Test takePictures failed for test case usage=%" PRIu64
+ ", maxImages=%d, async=%d, pictureCount=%d",
+ readerUsage, readerMaxImages, readerAsync, pictureCount);
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ // Camera manager
+ ACameraManager* mCameraManager = nullptr;
+ ACameraIdList* mCameraIdList = nullptr;
+
+ bool isCapabilitySupported(ACameraMetadata* staticInfo,
+ acamera_metadata_enum_android_request_available_capabilities_t cap) {
+ ACameraMetadata_const_entry entry;
+ ACameraMetadata_getConstEntry(
+ staticInfo, ACAMERA_REQUEST_AVAILABLE_CAPABILITIES, &entry);
+ for (uint32_t i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == cap) {
+ return true;
+ }
+ }
return false;
}
- CameraHelper cameraHelper(testCase.getNativeWindow());
- ret = cameraHelper.initCamera();
- if (ret < 0) {
- ALOGE("Unable to initialize camera helper");
+ bool isSizeSupportedForFormat(ACameraMetadata* staticInfo,
+ int32_t format, int32_t width, int32_t height) {
+ ACameraMetadata_const_entry entry;
+ ACameraMetadata_getConstEntry(staticInfo,
+ ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, &entry);
+ for (uint32_t i = 0; i < entry.count; i += 4) {
+ if (entry.data.i32[i] == format &&
+ entry.data.i32[i+3] == ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ entry.data.i32[i+1] == width &&
+ entry.data.i32[i+2] == height) {
+ return true;
+ }
+ }
return false;
}
+ void findCandidateLogicalCamera(const char **cameraId,
+ ACameraMetadata** staticMetadata,
+ std::vector<const char*>* candidatePhysicalIds) {
+ // Find first available logical camera
+ for (int i = 0; i < mCameraIdList->numCameras; i++) {
+ camera_status_t ret;
+ ret = ACameraManager_getCameraCharacteristics(
+ mCameraManager, mCameraIdList->cameraIds[i], staticMetadata);
+ ASSERT_EQ(ret, ACAMERA_OK);
+ ASSERT_NE(*staticMetadata, nullptr);
+
+ if (!isCapabilitySupported(*staticMetadata,
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA)) {
+ ACameraMetadata_free(*staticMetadata);
+ *staticMetadata = nullptr;
+ continue;
+ }
+
+ // Check returned physical camera Ids are valid
+ size_t physicalCameraIdCnt = 0;
+ const char*const* physicalCameraIds = nullptr;
+ bool isLogicalCamera = ACameraMetadata_isLogicalMultiCamera(*staticMetadata,
+ &physicalCameraIdCnt, &physicalCameraIds);
+ ASSERT_TRUE(isLogicalCamera);
+ ASSERT_GE(physicalCameraIdCnt, 2);
+ ACameraMetadata* physicalCameraMetadata = nullptr;
+ candidatePhysicalIds->clear();
+ for (size_t j = 0; j < physicalCameraIdCnt && candidatePhysicalIds->size() < 2; j++) {
+ ASSERT_GT(strlen(physicalCameraIds[j]), 0);
+ ret = ACameraManager_getCameraCharacteristics(
+ mCameraManager, physicalCameraIds[j], &physicalCameraMetadata);
+ ASSERT_EQ(ret, ACAMERA_OK);
+ ASSERT_NE(physicalCameraMetadata, nullptr);
+
+ if (isSizeSupportedForFormat(physicalCameraMetadata, kTestImageFormat,
+ kTestImageWidth, kTestImageHeight)) {
+ candidatePhysicalIds->push_back(physicalCameraIds[j]);
+ }
+ ACameraMetadata_free(physicalCameraMetadata);
+ }
+ if (candidatePhysicalIds->size() == 2) {
+ *cameraId = mCameraIdList->cameraIds[i];
+ return;
+ } else {
+ ACameraMetadata_free(*staticMetadata);
+ *staticMetadata = nullptr;
+ }
+ }
+ *cameraId = nullptr;
+ return;
+ }
+};
+
+TEST_F(AImageReaderVendorTest, CreateWindowNativeHandle) {
+ // We always use the first camera.
+ const char* cameraId = mCameraIdList->cameraIds[0];
+ ASSERT_TRUE(cameraId != nullptr);
+
+ ACameraMetadata* staticMetadata = nullptr;
+ camera_status_t ret = ACameraManager_getCameraCharacteristics(
+ mCameraManager, cameraId, &staticMetadata);
+ ASSERT_EQ(ret, ACAMERA_OK);
+ ASSERT_NE(staticMetadata, nullptr);
+
+ bool isBC = isCapabilitySupported(staticMetadata,
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+
+ ACameraMetadata_free(staticMetadata);
+
+ if (!isBC) {
+ ALOGW("Camera does not support BACKWARD_COMPATIBLE.");
+ return;
+ }
+
+ EXPECT_TRUE(testTakePicturesNative(cameraId));
+}
+
+TEST_F(AImageReaderVendorTest, LogicalCameraPhysicalStream) {
+ const char* cameraId = nullptr;
+ ACameraMetadata* staticMetadata = nullptr;
+ std::vector<const char*> physicalCameraIds;
+
+ findCandidateLogicalCamera(&cameraId, &staticMetadata, &physicalCameraIds);
+ if (cameraId == nullptr) {
+ // Couldn't find logical camera to test
+ return;
+ }
+
+ // Test streaming the logical multi-camera
+ uint64_t readerUsage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
+ int32_t readerMaxImages = 8;
+ bool readerAsync = false;
+ const int pictureCount = 6;
+ std::vector<ImageReaderTestCase*> testCases;
+ for (size_t i = 0; i < 3; i++) {
+ ImageReaderTestCase* testCase = new ImageReaderTestCase(
+ kTestImageWidth, kTestImageHeight, kTestImageFormat, readerUsage, readerMaxImages,
+ readerAsync);
+ ASSERT_EQ(testCase->initImageReader(), 0);
+ testCases.push_back(testCase);
+ }
+
+ CameraHelper cameraHelper(cameraId, mCameraManager);
+ std::vector<CameraHelper::PhysicalImgReaderInfo> physicalImgReaderInfo;
+ physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
+ physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
+
+ int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(), physicalImgReaderInfo);
+ ASSERT_EQ(ret, 0);
+
if (!cameraHelper.isCameraReady()) {
ALOGW("Camera is not ready after successful initialization. It's either due to camera on "
"board lacks BACKWARDS_COMPATIBLE capability or the device does not have camera on "
"board.");
- return true;
+ return;
}
for (int i = 0; i < pictureCount; i++) {
- ret = cameraHelper.takePicture();
- if (ret < 0) {
- ALOGE("Unable to take picture");
- return false;
- }
+ ret = cameraHelper.takeLogicalCameraPicture();
+ ASSERT_EQ(ret, 0);
}
// Sleep until all capture finished
for (int i = 0; i < kCaptureWaitRetry * pictureCount; i++) {
usleep(kCaptureWaitUs);
- if (testCase.getAcquiredImageCount() == pictureCount) {
+ if (testCases[0]->getAcquiredImageCount() == pictureCount) {
ALOGI("Session take ~%d ms to capture %d images", i * kCaptureWaitUs / 1000,
pictureCount);
break;
}
}
- return testCase.getAcquiredImageCount() == pictureCount &&
- cameraHelper.checkCallbacks(pictureCount);
-}
+ ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
+ ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
+ ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
+ ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
-class AImageReaderWindowHandleTest : public ::testing::Test {
- public:
- void SetUp() override {
- }
- void TearDown() override {
-
- }
-
-};
-
-bool testTakePicturesNative() {
- for (auto& readerUsage :
- {AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}) {
- for (auto& readerMaxImages : {1, 4, 8}) {
- for (auto& readerAsync : {true, false}) {
- for (auto& pictureCount : {1, 4, 8}) {
- if (!takePictures(readerUsage, readerMaxImages, readerAsync, pictureCount)) {
- ALOGE("Test takePictures failed for test case usage=%" PRIu64 ", maxImages=%d, "
- "async=%d, pictureCount=%d",
- readerUsage, readerMaxImages, readerAsync, pictureCount);
- return false;
- }
- }
- }
- }
- }
- return true;
-}
-
-
-TEST_F(AImageReaderWindowHandleTest, CreateWindowNativeHandle) {
- EXPECT_TRUE(testTakePicturesNative());
+ ACameraMetadata_free(staticMetadata);
}
} // namespace
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index fa8a7a3..8534b28 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -480,7 +480,8 @@
sp<Surface> surface(new Surface(gbProducer, /*controlledByApp*/false));
- OutputConfiguration output(gbProducer, /*rotation*/0);
+ String16 noPhysicalId;
+ OutputConfiguration output(gbProducer, /*rotation*/0, noPhysicalId);
// Can we configure?
res = device->beginConfigure();
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index c7619af..7a10302 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -191,7 +191,6 @@
LOCAL_MODULE:= mediafilter
LOCAL_SANITIZE := cfi
-LOCAL_SANITIZE_DIAG := cfi
include $(BUILD_EXECUTABLE)
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index edf2a6c..34a9a40 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -224,11 +224,15 @@
player->setSource(rawSource);
rawSource.clear();
- player->start(true /* sourceAlreadyStarted */);
+ err = player->start(true /* sourceAlreadyStarted */);
- status_t finalStatus;
- while (!player->reachedEOS(&finalStatus)) {
- usleep(100000ll);
+ if (err == OK) {
+ status_t finalStatus;
+ while (!player->reachedEOS(&finalStatus)) {
+ usleep(100000ll);
+ }
+ } else {
+ fprintf(stderr, "unable to start playback err=%d (0x%08x)\n", err, err);
}
delete player;
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 94f9e02..01efb22 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -27,7 +27,6 @@
"libmediadrmmetrics_lite",
"libmediametrics",
"libmediautils",
- "libprotobuf-cpp-lite",
"libstagefright_foundation",
"libutils",
"android.hardware.drm@1.0",
@@ -60,20 +59,17 @@
shared_libs: [
"android.hardware.drm@1.0",
"android.hardware.drm@1.1",
- "libbase",
"libbinder",
"libhidlbase",
"liblog",
"libmediametrics",
"libprotobuf-cpp-lite",
- "libstagefright_foundation",
"libutils",
],
cflags: [
// Suppress unused parameter and no error options. These cause problems
// with the when using the map type in a proto definition.
"-Wno-unused-parameter",
- "-Wno-error",
],
}
@@ -106,7 +102,6 @@
// Suppress unused parameter and no error options. These cause problems
// when using the map type in a proto definition.
"-Wno-unused-parameter",
- "-Wno-error",
],
}
diff --git a/drm/libmediadrm/tests/Android.bp b/drm/libmediadrm/tests/Android.bp
index dcd59b7..9e0115e 100644
--- a/drm/libmediadrm/tests/Android.bp
+++ b/drm/libmediadrm/tests/Android.bp
@@ -34,7 +34,6 @@
// Suppress unused parameter and no error options. These cause problems
// when using the map type in a proto definition.
"-Wno-unused-parameter",
- "-Wno-error",
]
}
diff --git a/include/media/MediaExtractorPluginApi.h b/include/media/MediaExtractorPluginApi.h
index b480bbe..854bf83 100644
--- a/include/media/MediaExtractorPluginApi.h
+++ b/include/media/MediaExtractorPluginApi.h
@@ -47,33 +47,11 @@
NONBLOCKING = 16
};
-struct CMediaTrack {
- void *data;
- void (*free)(void *data);
-
- status_t (*start)(void *data);
- status_t (*stop)(void *data);
- status_t (*getFormat)(void *data, MetaDataBase &format);
- status_t (*read)(void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs);
- bool (*supportsNonBlockingRead)(void *data);
-};
-
-struct CMediaTrackV2 {
- void *data;
- void (*free)(void *data);
-
- media_status_t (*start)(void *data);
- media_status_t (*stop)(void *data);
- media_status_t (*getFormat)(void *data, AMediaFormat *format);
- media_status_t (*read)(void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs);
- bool (*supportsNonBlockingRead)(void *data);
-};
-
/**
- * only use CMediaBufferV3 allocated from the CMediaBufferGroupV3 that is
+ * only use CMediaBuffer allocated from the CMediaBufferGroup that is
* provided to CMediaTrack::start()
*/
-struct CMediaBufferV3 {
+struct CMediaBuffer {
void *handle;
void (*release)(void *handle);
void* (*data)(void *handle);
@@ -84,49 +62,32 @@
AMediaFormat* (*meta_data)(void *handle);
};
-struct CMediaBufferGroupV3 {
+struct CMediaBufferGroup {
void *handle;
bool (*init)(void *handle, size_t buffers, size_t buffer_size, size_t growthLimit);
void (*add_buffer)(void *handle, size_t size);
media_status_t (*acquire_buffer)(void *handle,
- CMediaBufferV3 **buffer, bool nonBlocking, size_t requestedSize);
+ CMediaBuffer **buffer, bool nonBlocking, size_t requestedSize);
bool (*has_buffers)(void *handle);
};
-struct CMediaTrackV3 {
+struct CMediaTrack {
void *data;
void (*free)(void *data);
- media_status_t (*start)(void *data, CMediaBufferGroupV3 *bufferGroup);
+ media_status_t (*start)(void *data, CMediaBufferGroup *bufferGroup);
media_status_t (*stop)(void *data);
media_status_t (*getFormat)(void *data, AMediaFormat *format);
- media_status_t (*read)(void *data, CMediaBufferV3 **buffer, uint32_t options, int64_t seekPosUs);
+ media_status_t (*read)(void *data, CMediaBuffer **buffer, uint32_t options, int64_t seekPosUs);
bool (*supportsNonBlockingRead)(void *data);
};
-struct CMediaExtractorV1 {
+struct CMediaExtractor {
void *data;
void (*free)(void *data);
size_t (*countTracks)(void *data);
CMediaTrack* (*getTrack)(void *data, size_t index);
- status_t (*getTrackMetaData)(
- void *data,
- MetaDataBase& meta,
- size_t index, uint32_t flags);
-
- status_t (*getMetaData)(void *data, MetaDataBase& meta);
- uint32_t (*flags)(void *data);
- status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
- const char * (*name)(void *data);
-};
-
-struct CMediaExtractorV2 {
- void *data;
-
- void (*free)(void *data);
- size_t (*countTracks)(void *data);
- CMediaTrackV2* (*getTrack)(void *data, size_t index);
media_status_t (*getTrackMetaData)(
void *data,
AMediaFormat *meta,
@@ -138,48 +99,19 @@
const char * (*name)(void *data);
};
-struct CMediaExtractorV3 {
- void *data;
-
- void (*free)(void *data);
- size_t (*countTracks)(void *data);
- CMediaTrackV3* (*getTrack)(void *data, size_t index);
- media_status_t (*getTrackMetaData)(
- void *data,
- AMediaFormat *meta,
- size_t index, uint32_t flags);
-
- media_status_t (*getMetaData)(void *data, AMediaFormat *meta);
- uint32_t (*flags)(void *data);
- media_status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
- const char * (*name)(void *data);
-};
-
-typedef CMediaExtractorV1* (*CreatorFuncV1)(CDataSource *source, void *meta);
+typedef CMediaExtractor* (*CreatorFunc)(CDataSource *source, void *meta);
typedef void (*FreeMetaFunc)(void *meta);
// The sniffer can optionally fill in an opaque object, "meta", that helps
// the corresponding extractor initialize its state without duplicating
// effort already exerted by the sniffer. If "freeMeta" is given, it will be
// called against the opaque object when it is no longer used.
-typedef CreatorFuncV1 (*SnifferFuncV1)(
+typedef CreatorFunc (*SnifferFunc)(
CDataSource *source, float *confidence,
void **meta, FreeMetaFunc *freeMeta);
-typedef CMediaExtractorV2* (*CreatorFuncV2)(CDataSource *source, void *meta);
-
-typedef CreatorFuncV2 (*SnifferFuncV2)(
- CDataSource *source, float *confidence,
- void **meta, FreeMetaFunc *freeMeta);
-
-typedef CMediaExtractorV3* (*CreatorFuncV3)(CDataSource *source, void *meta);
-
-typedef CreatorFuncV3 (*SnifferFuncV3)(
- CDataSource *source, float *confidence,
- void **meta, FreeMetaFunc *freeMeta);
-
-typedef CMediaExtractorV1 CMediaExtractor;
-typedef CreatorFuncV1 CreatorFunc;
+typedef CMediaExtractor CMediaExtractor;
+typedef CreatorFunc CreatorFunc;
typedef struct {
@@ -203,16 +135,17 @@
const char *extractor_name;
union {
- SnifferFuncV1 v1;
- SnifferFuncV2 v2;
- SnifferFuncV3 v3;
+ SnifferFunc v2;
} sniff;
};
+// the C++ based API which first shipped in P and is no longer supported
const uint32_t EXTRACTORDEF_VERSION_LEGACY = 1;
-const uint32_t EXTRACTORDEF_VERSION_CURRENT = 2;
-const uint32_t EXTRACTORDEF_VERSION = EXTRACTORDEF_VERSION_LEGACY;
+// the first C/NDK based API
+const uint32_t EXTRACTORDEF_VERSION_NDK_V1 = 2;
+
+const uint32_t EXTRACTORDEF_VERSION = EXTRACTORDEF_VERSION_NDK_V1;
// each plugin library exports one function of this type
typedef ExtractorDef (*GetExtractorDef)();
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
index 292ec93..f4d4da6 100644
--- a/include/media/MediaExtractorPluginHelper.h
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -35,193 +35,56 @@
struct MediaTrack;
-class MediaTrackHelper {
-public:
- virtual ~MediaTrackHelper() {};
- virtual status_t start() = 0;
- virtual status_t stop() = 0;
- virtual status_t getFormat(MetaDataBase& format) = 0;
+class MediaTrackHelper;
- class ReadOptions {
- public:
- enum SeekMode : int32_t {
- SEEK_PREVIOUS_SYNC,
- SEEK_NEXT_SYNC,
- SEEK_CLOSEST_SYNC,
- SEEK_CLOSEST,
- SEEK_FRAME_INDEX,
- };
-
- ReadOptions(uint32_t options, int64_t seekPosUs) {
- mOptions = options;
- mSeekPosUs = seekPosUs;
- }
- bool getSeekTo(int64_t *time_us, SeekMode *mode) const {
- if ((mOptions & CMediaTrackReadOptions::SEEK) == 0) {
- return false;
- }
- *time_us = mSeekPosUs;
- *mode = (SeekMode) (mOptions & 7);
- return true;
- }
- bool getNonBlocking() const {
- return mOptions & CMediaTrackReadOptions::NONBLOCKING;
- }
- private:
- uint32_t mOptions;
- int64_t mSeekPosUs;
- };
-
- virtual status_t read(
- MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
- virtual bool supportsNonBlockingRead() { return false; }
-};
-
-inline CMediaTrack *wrap(MediaTrackHelper *track) {
- CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
- wrapper->data = track;
- wrapper->free = [](void *data) -> void {
- delete (MediaTrackHelper*)(data);
- };
- wrapper->start = [](void *data) -> status_t {
- return ((MediaTrackHelper*)data)->start();
- };
- wrapper->stop = [](void *data) -> status_t {
- return ((MediaTrackHelper*)data)->stop();
- };
- wrapper->getFormat = [](void *data, MetaDataBase &meta) -> status_t {
- return ((MediaTrackHelper*)data)->getFormat(meta);
- };
- wrapper->read = [](void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs)
- -> status_t {
- MediaTrackHelper::ReadOptions opts(options, seekPosUs);
- return ((MediaTrackHelper*)data)->read(buffer, &opts);
- };
- wrapper->supportsNonBlockingRead = [](void *data) -> bool {
- return ((MediaTrackHelper*)data)->supportsNonBlockingRead();
- };
- return wrapper;
-}
-
-
-class MediaTrackHelperV2 {
-public:
- virtual ~MediaTrackHelperV2() {};
- virtual media_status_t start() = 0;
- virtual media_status_t stop() = 0;
- virtual media_status_t getFormat(AMediaFormat *format) = 0;
-
- class ReadOptions {
- public:
- enum SeekMode : int32_t {
- SEEK_PREVIOUS_SYNC,
- SEEK_NEXT_SYNC,
- SEEK_CLOSEST_SYNC,
- SEEK_CLOSEST,
- SEEK_FRAME_INDEX,
- };
-
- ReadOptions(uint32_t options, int64_t seekPosUs) {
- mOptions = options;
- mSeekPosUs = seekPosUs;
- }
- bool getSeekTo(int64_t *time_us, SeekMode *mode) const {
- if ((mOptions & CMediaTrackReadOptions::SEEK) == 0) {
- return false;
- }
- *time_us = mSeekPosUs;
- *mode = (SeekMode) (mOptions & 7);
- return true;
- }
- bool getNonBlocking() const {
- return mOptions & CMediaTrackReadOptions::NONBLOCKING;
- }
- private:
- uint32_t mOptions;
- int64_t mSeekPosUs;
- };
-
- virtual media_status_t read(
- MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
- virtual bool supportsNonBlockingRead() { return false; }
-};
-
-inline CMediaTrackV2 *wrapV2(MediaTrackHelperV2 *track) {
- CMediaTrackV2 *wrapper = (CMediaTrackV2*) malloc(sizeof(CMediaTrackV2));
- wrapper->data = track;
- wrapper->free = [](void *data) -> void {
- delete (MediaTrackHelperV2*)(data);
- };
- wrapper->start = [](void *data) -> media_status_t {
- return ((MediaTrackHelperV2*)data)->start();
- };
- wrapper->stop = [](void *data) -> media_status_t {
- return ((MediaTrackHelperV2*)data)->stop();
- };
- wrapper->getFormat = [](void *data, AMediaFormat *meta) -> media_status_t {
- return ((MediaTrackHelperV2*)data)->getFormat(meta);
- };
- wrapper->read = [](void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs)
- -> media_status_t {
- MediaTrackHelperV2::ReadOptions opts(options, seekPosUs);
- return ((MediaTrackHelperV2*)data)->read(buffer, &opts);
- };
- wrapper->supportsNonBlockingRead = [](void *data) -> bool {
- return ((MediaTrackHelperV2*)data)->supportsNonBlockingRead();
- };
- return wrapper;
-}
-
-class MediaTrackHelperV3;
-
-class MediaBufferHelperV3 {
+class MediaBufferHelper {
private:
- friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *);
- CMediaBufferV3 *mBuffer;
+ friend CMediaTrack *wrap(MediaTrackHelper *);
+ CMediaBuffer *mBuffer;
public:
- MediaBufferHelperV3(CMediaBufferV3 *buf) {
+ MediaBufferHelper(CMediaBuffer *buf) {
mBuffer = buf;
}
- ~MediaBufferHelperV3() {}
+ virtual ~MediaBufferHelper() {}
- void release() {
+ virtual void release() {
mBuffer->release(mBuffer->handle);
}
- void* data() {
+ virtual void* data() {
return mBuffer->data(mBuffer->handle);
}
- size_t size() {
+ virtual size_t size() {
return mBuffer->size(mBuffer->handle);
}
- size_t range_offset() {
+ virtual size_t range_offset() {
return mBuffer->range_offset(mBuffer->handle);
}
- size_t range_length() {
+ virtual size_t range_length() {
return mBuffer->range_length(mBuffer->handle);
}
- void set_range(size_t offset, size_t length) {
+ virtual void set_range(size_t offset, size_t length) {
mBuffer->set_range(mBuffer->handle, offset, length);
}
- AMediaFormat *meta_data() {
+ virtual AMediaFormat *meta_data() {
return mBuffer->meta_data(mBuffer->handle);
}
};
-class MediaBufferGroupHelperV3 {
+class MediaBufferGroupHelper {
private:
- CMediaBufferGroupV3 *mGroup;
- std::map<CMediaBufferV3*, MediaBufferHelperV3*> mBufferHelpers;
+ CMediaBufferGroup *mGroup;
+ std::map<CMediaBuffer*, MediaBufferHelper*> mBufferHelpers;
public:
- MediaBufferGroupHelperV3(CMediaBufferGroupV3 *group) {
+ MediaBufferGroupHelper(CMediaBufferGroup *group) {
mGroup = group;
}
- ~MediaBufferGroupHelperV3() {
+ ~MediaBufferGroupHelper() {
// delete all entries in map
ALOGV("buffergroup %p map has %zu entries", this, mBufferHelpers.size());
for (auto it = mBufferHelpers.begin(); it != mBufferHelpers.end(); ++it) {
@@ -235,14 +98,14 @@
mGroup->add_buffer(mGroup->handle, size);
}
media_status_t acquire_buffer(
- MediaBufferHelperV3 **buffer, bool nonBlocking = false, size_t requestedSize = 0) {
- CMediaBufferV3 *buf = nullptr;
+ MediaBufferHelper **buffer, bool nonBlocking = false, size_t requestedSize = 0) {
+ CMediaBuffer *buf = nullptr;
media_status_t ret =
mGroup->acquire_buffer(mGroup->handle, &buf, nonBlocking, requestedSize);
if (ret == AMEDIA_OK && buf != nullptr) {
auto helper = mBufferHelpers.find(buf);
if (helper == mBufferHelpers.end()) {
- MediaBufferHelperV3* newHelper = new MediaBufferHelperV3(buf);
+ MediaBufferHelper* newHelper = new MediaBufferHelper(buf);
mBufferHelpers.insert(std::make_pair(buf, newHelper));
*buffer = newHelper;
} else {
@@ -258,11 +121,11 @@
}
};
-class MediaTrackHelperV3 {
+class MediaTrackHelper {
public:
- MediaTrackHelperV3() : mBufferGroup(nullptr) {
+ MediaTrackHelper() : mBufferGroup(nullptr) {
}
- virtual ~MediaTrackHelperV3() {
+ virtual ~MediaTrackHelper() {
delete mBufferGroup;
}
virtual media_status_t start() = 0;
@@ -300,45 +163,45 @@
};
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL) = 0;
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL) = 0;
virtual bool supportsNonBlockingRead() { return false; }
protected:
- friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track);
- MediaBufferGroupHelperV3 *mBufferGroup;
+ friend CMediaTrack *wrap(MediaTrackHelper *track);
+ MediaBufferGroupHelper *mBufferGroup;
};
-inline CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track) {
- CMediaTrackV3 *wrapper = (CMediaTrackV3*) malloc(sizeof(CMediaTrackV3));
+inline CMediaTrack *wrap(MediaTrackHelper *track) {
+ CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
wrapper->data = track;
wrapper->free = [](void *data) -> void {
- delete (MediaTrackHelperV3*)(data);
+ delete (MediaTrackHelper*)(data);
};
- wrapper->start = [](void *data, CMediaBufferGroupV3 *bufferGroup) -> media_status_t {
- if (((MediaTrackHelperV3*)data)->mBufferGroup) {
+ wrapper->start = [](void *data, CMediaBufferGroup *bufferGroup) -> media_status_t {
+ if (((MediaTrackHelper*)data)->mBufferGroup) {
// this shouldn't happen, but handle it anyway
- delete ((MediaTrackHelperV3*)data)->mBufferGroup;
+ delete ((MediaTrackHelper*)data)->mBufferGroup;
}
- ((MediaTrackHelperV3*)data)->mBufferGroup = new MediaBufferGroupHelperV3(bufferGroup);
- return ((MediaTrackHelperV3*)data)->start();
+ ((MediaTrackHelper*)data)->mBufferGroup = new MediaBufferGroupHelper(bufferGroup);
+ return ((MediaTrackHelper*)data)->start();
};
wrapper->stop = [](void *data) -> media_status_t {
- return ((MediaTrackHelperV3*)data)->stop();
+ return ((MediaTrackHelper*)data)->stop();
};
wrapper->getFormat = [](void *data, AMediaFormat *meta) -> media_status_t {
- return ((MediaTrackHelperV3*)data)->getFormat(meta);
+ return ((MediaTrackHelper*)data)->getFormat(meta);
};
- wrapper->read = [](void *data, CMediaBufferV3 **buffer, uint32_t options, int64_t seekPosUs)
+ wrapper->read = [](void *data, CMediaBuffer **buffer, uint32_t options, int64_t seekPosUs)
-> media_status_t {
- MediaTrackHelperV3::ReadOptions opts(options, seekPosUs);
- MediaBufferHelperV3 *buf = NULL;
- media_status_t ret = ((MediaTrackHelperV3*)data)->read(&buf, &opts);
+ MediaTrackHelper::ReadOptions opts(options, seekPosUs);
+ MediaBufferHelper *buf = NULL;
+ media_status_t ret = ((MediaTrackHelper*)data)->read(&buf, &opts);
if (ret == AMEDIA_OK && buf != nullptr) {
*buffer = buf->mBuffer;
}
return ret;
};
wrapper->supportsNonBlockingRead = [](void *data) -> bool {
- return ((MediaTrackHelperV3*)data)->supportsNonBlockingRead();
+ return ((MediaTrackHelper*)data)->supportsNonBlockingRead();
};
return wrapper;
}
@@ -356,13 +219,13 @@
enum GetTrackMetaDataFlags {
kIncludeExtensiveMetaData = 1
};
- virtual status_t getTrackMetaData(
- MetaDataBase& meta,
+ virtual media_status_t getTrackMetaData(
+ AMediaFormat *meta,
size_t index, uint32_t flags = 0) = 0;
// Return container specific meta-data. The default implementation
// returns an empty metadata object.
- virtual status_t getMetaData(MetaDataBase& meta) = 0;
+ virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
enum Flags {
CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
@@ -377,8 +240,8 @@
return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
};
- virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
- return INVALID_OPERATION;
+ virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+ return AMEDIA_ERROR_INVALID_OPERATION;
}
virtual const char * name() { return "<unspecified>"; }
@@ -405,13 +268,13 @@
};
wrapper->getTrackMetaData = [](
void *data,
- MetaDataBase& meta,
- size_t index, uint32_t flags) -> status_t {
+ AMediaFormat *meta,
+ size_t index, uint32_t flags) -> media_status_t {
return ((MediaExtractorPluginHelper*)data)->getTrackMetaData(meta, index, flags);
};
wrapper->getMetaData = [](
void *data,
- MetaDataBase& meta) -> status_t {
+ AMediaFormat *meta) -> media_status_t {
return ((MediaExtractorPluginHelper*)data)->getMetaData(meta);
};
wrapper->flags = [](
@@ -419,7 +282,7 @@
return ((MediaExtractorPluginHelper*)data)->flags();
};
wrapper->setMediaCas = [](
- void *data, const uint8_t *casToken, size_t size) -> status_t {
+ void *data, const uint8_t *casToken, size_t size) -> media_status_t {
return ((MediaExtractorPluginHelper*)data)->setMediaCas(casToken, size);
};
wrapper->name = [](
@@ -429,172 +292,6 @@
return wrapper;
}
-class MediaExtractorPluginHelperV2
-{
-public:
- virtual ~MediaExtractorPluginHelperV2() {}
- virtual size_t countTracks() = 0;
- virtual MediaTrackHelperV2 *getTrack(size_t index) = 0;
-
- enum GetTrackMetaDataFlags {
- kIncludeExtensiveMetaData = 1
- };
- virtual media_status_t getTrackMetaData(
- AMediaFormat *meta,
- size_t index, uint32_t flags = 0) = 0;
-
- // Return container specific meta-data. The default implementation
- // returns an empty metadata object.
- virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
-
- enum Flags {
- CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
- CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
- CAN_PAUSE = 4,
- CAN_SEEK = 8, // the "seek bar"
- };
-
- // If subclasses do _not_ override this, the default is
- // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
- virtual uint32_t flags() const {
- return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
- };
-
- virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
- return AMEDIA_ERROR_INVALID_OPERATION;
- }
-
- virtual const char * name() { return "<unspecified>"; }
-
-protected:
- MediaExtractorPluginHelperV2() {}
-
-private:
- MediaExtractorPluginHelperV2(const MediaExtractorPluginHelperV2 &);
- MediaExtractorPluginHelperV2 &operator=(const MediaExtractorPluginHelperV2 &);
-};
-
-inline CMediaExtractorV2 *wrapV2(MediaExtractorPluginHelperV2 *extractor) {
- CMediaExtractorV2 *wrapper = (CMediaExtractorV2*) malloc(sizeof(CMediaExtractorV2));
- wrapper->data = extractor;
- wrapper->free = [](void *data) -> void {
- delete (MediaExtractorPluginHelperV2*)(data);
- };
- wrapper->countTracks = [](void *data) -> size_t {
- return ((MediaExtractorPluginHelperV2*)data)->countTracks();
- };
- wrapper->getTrack = [](void *data, size_t index) -> CMediaTrackV2* {
- return wrapV2(((MediaExtractorPluginHelperV2*)data)->getTrack(index));
- };
- wrapper->getTrackMetaData = [](
- void *data,
- AMediaFormat *meta,
- size_t index, uint32_t flags) -> media_status_t {
- return ((MediaExtractorPluginHelperV2*)data)->getTrackMetaData(meta, index, flags);
- };
- wrapper->getMetaData = [](
- void *data,
- AMediaFormat *meta) -> media_status_t {
- return ((MediaExtractorPluginHelperV2*)data)->getMetaData(meta);
- };
- wrapper->flags = [](
- void *data) -> uint32_t {
- return ((MediaExtractorPluginHelperV2*)data)->flags();
- };
- wrapper->setMediaCas = [](
- void *data, const uint8_t *casToken, size_t size) -> media_status_t {
- return ((MediaExtractorPluginHelperV2*)data)->setMediaCas(casToken, size);
- };
- wrapper->name = [](
- void *data) -> const char * {
- return ((MediaExtractorPluginHelperV2*)data)->name();
- };
- return wrapper;
-}
-
-class MediaExtractorPluginHelperV3
-{
-public:
- virtual ~MediaExtractorPluginHelperV3() {}
- virtual size_t countTracks() = 0;
- virtual MediaTrackHelperV3 *getTrack(size_t index) = 0;
-
- enum GetTrackMetaDataFlags {
- kIncludeExtensiveMetaData = 1
- };
- virtual media_status_t getTrackMetaData(
- AMediaFormat *meta,
- size_t index, uint32_t flags = 0) = 0;
-
- // Return container specific meta-data. The default implementation
- // returns an empty metadata object.
- virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
-
- enum Flags {
- CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
- CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
- CAN_PAUSE = 4,
- CAN_SEEK = 8, // the "seek bar"
- };
-
- // If subclasses do _not_ override this, the default is
- // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
- virtual uint32_t flags() const {
- return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
- };
-
- virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
- return AMEDIA_ERROR_INVALID_OPERATION;
- }
-
- virtual const char * name() { return "<unspecified>"; }
-
-protected:
- MediaExtractorPluginHelperV3() {}
-
-private:
- MediaExtractorPluginHelperV3(const MediaExtractorPluginHelperV2 &);
- MediaExtractorPluginHelperV3 &operator=(const MediaExtractorPluginHelperV2 &);
-};
-
-inline CMediaExtractorV3 *wrapV3(MediaExtractorPluginHelperV3 *extractor) {
- CMediaExtractorV3 *wrapper = (CMediaExtractorV3*) malloc(sizeof(CMediaExtractorV3));
- wrapper->data = extractor;
- wrapper->free = [](void *data) -> void {
- delete (MediaExtractorPluginHelperV3*)(data);
- };
- wrapper->countTracks = [](void *data) -> size_t {
- return ((MediaExtractorPluginHelperV3*)data)->countTracks();
- };
- wrapper->getTrack = [](void *data, size_t index) -> CMediaTrackV3* {
- return wrapV3(((MediaExtractorPluginHelperV3*)data)->getTrack(index));
- };
- wrapper->getTrackMetaData = [](
- void *data,
- AMediaFormat *meta,
- size_t index, uint32_t flags) -> media_status_t {
- return ((MediaExtractorPluginHelperV3*)data)->getTrackMetaData(meta, index, flags);
- };
- wrapper->getMetaData = [](
- void *data,
- AMediaFormat *meta) -> media_status_t {
- return ((MediaExtractorPluginHelperV3*)data)->getMetaData(meta);
- };
- wrapper->flags = [](
- void *data) -> uint32_t {
- return ((MediaExtractorPluginHelperV3*)data)->flags();
- };
- wrapper->setMediaCas = [](
- void *data, const uint8_t *casToken, size_t size) -> media_status_t {
- return ((MediaExtractorPluginHelperV3*)data)->setMediaCas(casToken, size);
- };
- wrapper->name = [](
- void *data) -> const char * {
- return ((MediaExtractorPluginHelperV3*)data)->name();
- };
- return wrapper;
-}
-
/* adds some convience methods */
class DataSourceHelper {
public:
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
index baa3410..e828a7f 100644
--- a/include/media/MediaTrack.h
+++ b/include/media/MediaTrack.h
@@ -156,42 +156,6 @@
private:
CMediaTrack *wrapper;
-};
-
-class MediaTrackCUnwrapperV2 : public MediaTrack {
-public:
- explicit MediaTrackCUnwrapperV2(CMediaTrackV2 *wrapper);
-
- virtual status_t start();
- virtual status_t stop();
- virtual status_t getFormat(MetaDataBase& format);
- virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
-
- virtual bool supportNonblockingRead();
-
-protected:
- virtual ~MediaTrackCUnwrapperV2();
-
-private:
- CMediaTrackV2 *wrapper;
-};
-
-class MediaTrackCUnwrapperV3 : public MediaTrack {
-public:
- explicit MediaTrackCUnwrapperV3(CMediaTrackV3 *wrapper);
-
- virtual status_t start();
- virtual status_t stop();
- virtual status_t getFormat(MetaDataBase& format);
- virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
-
- virtual bool supportNonblockingRead();
-
-protected:
- virtual ~MediaTrackCUnwrapperV3();
-
-private:
- CMediaTrackV3 *wrapper;
MediaBufferGroup *bufferGroup;
};
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 2c734ac..5260909 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -253,9 +253,21 @@
}
void Accessor::Impl::handleInvalidateAck() {
- std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
- mBufferPool.processStatusMessages();
- mBufferPool.mInvalidation.onHandleAck();
+ std::map<ConnectionId, const sp<IObserver>> observers;
+ uint32_t invalidationId;
+ {
+ std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
+ mBufferPool.processStatusMessages();
+ mBufferPool.mInvalidation.onHandleAck(&observers, &invalidationId);
+ }
+ // Do not hold lock for send invalidations
+ for (auto it = observers.begin(); it != observers.end(); ++it) {
+ const sp<IObserver> observer = it->second;
+ if (observer) {
+ Return<void> transResult = observer->onMessage(it->first, invalidationId);
+ (void) transResult;
+ }
+ }
}
bool Accessor::Impl::isValid() {
@@ -365,19 +377,21 @@
sInvalidator->addAccessor(mId, impl);
}
-void Accessor::Impl::BufferPool::Invalidation::onHandleAck() {
+void Accessor::Impl::BufferPool::Invalidation::onHandleAck(
+ std::map<ConnectionId, const sp<IObserver>> *observers,
+ uint32_t *invalidationId) {
if (mInvalidationId != 0) {
+ *invalidationId = mInvalidationId;
std::set<int> deads;
for (auto it = mAcks.begin(); it != mAcks.end(); ++it) {
if (it->second != mInvalidationId) {
const sp<IObserver> observer = mObservers[it->first];
if (observer) {
- ALOGV("connection %lld call observer (%u: %u)",
+ observers->emplace(it->first, observer);
+ ALOGV("connection %lld will call observer (%u: %u)",
(long long)it->first, it->second, mInvalidationId);
- Return<void> transResult = observer->onMessage(it->first, mInvalidationId);
- (void) transResult;
- // N.B: ignore possibility of onMessage oneway call being
- // lost.
+ // N.B: onMessage will be called later. ignore possibility of
+ // onMessage# oneway call being lost.
it->second = mInvalidationId;
} else {
ALOGV("bufferpool2 observer died %lld", (long long)it->first);
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index b3faa96..eea72b9 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -158,7 +158,9 @@
BufferInvalidationChannel &channel,
const std::shared_ptr<Accessor::Impl> &impl);
- void onHandleAck();
+ void onHandleAck(
+ std::map<ConnectionId, const sp<IObserver>> *observers,
+ uint32_t *invalidationId);
} mInvalidation;
/// Buffer pool statistics which tracks allocation and transfer statistics.
struct Stats {
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index aeefbdb..87730ae 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -468,7 +468,8 @@
if (outargs.numOutBytes > 0) {
mInputSize = 0;
- int consumed = ((capacity / sizeof(int16_t)) - inargs.numInSamples);
+ int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
+ + outargs.numInSamples;
mInputTimeUs = work->input.ordinal.timestamp
+ (consumed * 1000000ll / channelCount / sampleRate);
buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index ee5cf27..cf06623 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -768,7 +768,11 @@
s_profile_params_ip.e_sub_cmd = IVE_CMD_CTL_SET_PROFILE_PARAMS;
s_profile_params_ip.e_profile = mIntf->getProfile_l();
- s_profile_params_ip.u4_entropy_coding_mode = mEntropyMode;
+ if (s_profile_params_ip.e_profile == IV_PROFILE_BASE) {
+ s_profile_params_ip.u4_entropy_coding_mode = 0;
+ } else {
+ s_profile_params_ip.u4_entropy_coding_mode = 1;
+ }
s_profile_params_ip.u4_timestamp_high = -1;
s_profile_params_ip.u4_timestamp_low = -1;
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 50b4d20..b8baec8 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -132,6 +132,56 @@
}
}
+class SimpleC2Component::BlockingBlockPool : public C2BlockPool {
+public:
+ BlockingBlockPool(const std::shared_ptr<C2BlockPool>& base): mBase{base} {}
+
+ virtual local_id_t getLocalId() const override {
+ return mBase->getLocalId();
+ }
+
+ virtual C2Allocator::id_t getAllocatorId() const override {
+ return mBase->getAllocatorId();
+ }
+
+ virtual c2_status_t fetchLinearBlock(
+ uint32_t capacity,
+ C2MemoryUsage usage,
+ std::shared_ptr<C2LinearBlock>* block) {
+ c2_status_t status;
+ do {
+ status = mBase->fetchLinearBlock(capacity, usage, block);
+ } while (status == C2_TIMED_OUT);
+ return status;
+ }
+
+ virtual c2_status_t fetchCircularBlock(
+ uint32_t capacity,
+ C2MemoryUsage usage,
+ std::shared_ptr<C2CircularBlock>* block) {
+ c2_status_t status;
+ do {
+ status = mBase->fetchCircularBlock(capacity, usage, block);
+ } while (status == C2_TIMED_OUT);
+ return status;
+ }
+
+ virtual c2_status_t fetchGraphicBlock(
+ uint32_t width, uint32_t height, uint32_t format,
+ C2MemoryUsage usage,
+ std::shared_ptr<C2GraphicBlock>* block) {
+ c2_status_t status;
+ do {
+ status = mBase->fetchGraphicBlock(width, height, format, usage,
+ block);
+ } while (status == C2_TIMED_OUT);
+ return status;
+ }
+
+private:
+ std::shared_ptr<C2BlockPool> mBase;
+};
+
////////////////////////////////////////////////////////////////////////////////
namespace {
@@ -446,12 +496,16 @@
}
}
- err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
+ std::shared_ptr<C2BlockPool> blockPool;
+ err = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
ALOGD("Using output block pool with poolID %llu => got %llu - %d",
(unsigned long long)poolId,
(unsigned long long)(
- mOutputBlockPool ? mOutputBlockPool->getLocalId() : 111000111),
+ blockPool ? blockPool->getLocalId() : 111000111),
err);
+ if (err == C2_OK) {
+ mOutputBlockPool = std::make_shared<BlockingBlockPool>(blockPool);
+ }
return err;
}();
if (err != C2_OK) {
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index b3a98f4..43029a9 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -234,7 +234,8 @@
typedef std::unordered_map<uint64_t, std::unique_ptr<C2Work>> PendingWork;
Mutexed<PendingWork> mPendingWork;
- std::shared_ptr<C2BlockPool> mOutputBlockPool;
+ class BlockingBlockPool;
+ std::shared_ptr<BlockingBlockPool> mOutputBlockPool;
SimpleC2Component() = delete;
};
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 280ae36..48825e4 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -405,10 +405,7 @@
int numFrames = 0;
int ret = vorbis_dsp_synthesis(mState, &pack, 1);
if (0 != ret) {
- ALOGE("vorbis_dsp_synthesis returned %d", ret);
- mSignalledError = true;
- work->result = C2_CORRUPTED;
- return;
+ ALOGD("vorbis_dsp_synthesis returned %d; ignored", ret);
} else {
numFrames = vorbis_dsp_pcmout(
mState, reinterpret_cast<int16_t *> (wView.data()),
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 8ecbf5d..9ba2362 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -188,6 +188,24 @@
.withConstValue(defaultColorInfo)
.build());
+ addParameter(
+ DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+ .withDefault(new C2StreamColorAspectsTuning::output(
+ 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+ .withFields({
+ C2F(mDefaultColorAspects, range).inRange(
+ C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+ C2F(mDefaultColorAspects, primaries).inRange(
+ C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+ C2F(mDefaultColorAspects, transfer).inRange(
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::TRANSFER_OTHER),
+ C2F(mDefaultColorAspects, matrix).inRange(
+ C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)
+ })
+ .withSetter(DefaultColorAspectsSetter)
+ .build());
+
// TODO: support more formats?
addParameter(
DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
@@ -228,6 +246,22 @@
return C2R::Ok();
}
+ static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsTuning::output> &me) {
+ (void)mayBlock;
+ if (me.v.range > C2Color::RANGE_OTHER) {
+ me.set().range = C2Color::RANGE_OTHER;
+ }
+ if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+ me.set().primaries = C2Color::PRIMARIES_OTHER;
+ }
+ if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+ me.set().transfer = C2Color::TRANSFER_OTHER;
+ }
+ if (me.v.matrix > C2Color::MATRIX_OTHER) {
+ me.set().matrix = C2Color::MATRIX_OTHER;
+ }
+ return C2R::Ok();
+ }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me,
const C2P<C2StreamPictureSizeInfo::output> &size) {
@@ -236,6 +270,9 @@
(void)me; // TODO: validate
return C2R::Ok();
}
+ std::shared_ptr<C2StreamColorAspectsTuning::output> getDefaultColorAspects_l() {
+ return mDefaultColorAspects;
+ }
static C2R Hdr10PlusInfoInputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::input> &me) {
(void)mayBlock;
@@ -256,6 +293,7 @@
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+ std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
#ifdef VP9
#if 0
std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
@@ -524,32 +562,129 @@
static void copyOutputBufferToYV12Frame(uint8_t *dst,
const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
size_t srcYStride, size_t srcUStride, size_t srcVStride,
- uint32_t width, uint32_t height, int32_t bpp) {
- size_t dstYStride = align(width, 16) * bpp ;
+ uint32_t width, uint32_t height) {
+ size_t dstYStride = align(width, 16);
size_t dstUVStride = align(dstYStride / 2, 16);
uint8_t *dstStart = dst;
for (size_t i = 0; i < height; ++i) {
- memcpy(dst, srcY, width * bpp);
+ memcpy(dst, srcY, width);
srcY += srcYStride;
dst += dstYStride;
}
dst = dstStart + dstYStride * height;
for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dst, srcV, width / 2 * bpp);
+ memcpy(dst, srcV, width / 2);
srcV += srcVStride;
dst += dstUVStride;
}
dst = dstStart + (dstYStride * height) + (dstUVStride * height / 2);
for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dst, srcU, width / 2 * bpp);
+ memcpy(dst, srcU, width / 2);
srcU += srcUStride;
dst += dstUVStride;
}
}
+static void convertYUV420Planar16ToY410(uint32_t *dst,
+ const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride,
+ size_t dstStride, size_t width, size_t height) {
+
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < height; y += 2) {
+ uint32_t *dstTop = (uint32_t *) dst;
+ uint32_t *dstBot = (uint32_t *) (dst + dstStride);
+ uint16_t *ySrcTop = (uint16_t*) srcY;
+ uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
+ uint16_t *uSrc = (uint16_t*) srcU;
+ uint16_t *vSrc = (uint16_t*) srcV;
+
+ uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+ size_t x = 0;
+ for (; x < width - 3; x += 4) {
+
+ u01 = *((uint32_t*)uSrc); uSrc += 2;
+ v01 = *((uint32_t*)vSrc); vSrc += 2;
+
+ y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
+ y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
+ y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
+ y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
+
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+ *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+ *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+ *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+ *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+ }
+
+ // There should be at most 2 more pixels to process. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (x < width) {
+ u01 = *uSrc;
+ v01 = *vSrc;
+ y01 = *((uint32_t*)ySrcTop);
+ y45 = *((uint32_t*)ySrcBot);
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = ((y01 >> 16) << 10) | uv0;
+ *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = ((y45 >> 16) << 10) | uv0;
+ }
+
+ srcY += srcYStride * 2;
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dst += dstStride * 2;
+ }
+
+ return;
+}
+
+static void convertYUV420Planar16ToYUV420Planar(uint8_t *dst,
+ const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride,
+ size_t dstStride, size_t width, size_t height) {
+
+ uint8_t *dstY = (uint8_t *)dst;
+ size_t dstYSize = dstStride * height;
+ size_t dstUVStride = align(dstStride / 2, 16);
+ size_t dstUVSize = dstUVStride * height / 2;
+ uint8_t *dstV = dstY + dstYSize;
+ uint8_t *dstU = dstV + dstUVSize;
+
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = (uint8_t)(srcY[x] >> 2);
+ }
+
+ srcY += srcYStride;
+ dstY += dstStride;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstU[x] = (uint8_t)(srcU[x] >> 2);
+ dstV[x] = (uint8_t)(srcV[x] >> 2);
+ }
+
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstU += dstUVStride;
+ dstV += dstUVStride;
+ }
+ return;
+}
bool C2SoftVpxDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
@@ -581,15 +716,21 @@
}
CHECK(img->fmt == VPX_IMG_FMT_I420 || img->fmt == VPX_IMG_FMT_I42016);
- int32_t bpp = 1;
- if (img->fmt == VPX_IMG_FMT_I42016) {
- bpp = 2;
- }
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ if (img->fmt == VPX_IMG_FMT_I42016) {
+ IntfImpl::Lock lock = mIntf->lock();
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+
+ if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
+ defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
+ defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
+ format = HAL_PIXEL_FORMAT_RGBA_1010102;
+ }
+ }
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
- c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16) * bpp, mHeight, format, usage, &block);
+ c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format, usage, &block);
if (err != C2_OK) {
ALOGE("fetchGraphicBlock for Output failed with status %d", err);
work->result = err;
@@ -610,12 +751,30 @@
size_t srcYStride = img->stride[VPX_PLANE_Y];
size_t srcUStride = img->stride[VPX_PLANE_U];
size_t srcVStride = img->stride[VPX_PLANE_V];
- const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
- const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
- const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
- copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride, mWidth, mHeight, bpp);
+ if (img->fmt == VPX_IMG_FMT_I42016) {
+ const uint16_t *srcY = (const uint16_t *)img->planes[VPX_PLANE_Y];
+ const uint16_t *srcU = (const uint16_t *)img->planes[VPX_PLANE_U];
+ const uint16_t *srcV = (const uint16_t *)img->planes[VPX_PLANE_V];
+
+ if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
+ convertYUV420Planar16ToY410((uint32_t *)dst, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ align(mWidth, 16),
+ mWidth, mHeight);
+ } else {
+ convertYUV420Planar16ToYUV420Planar(dst, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ align(mWidth, 16),
+ mWidth, mHeight);
+ }
+ } else {
+ const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
+ copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV,
+ srcYStride, srcUStride, srcVStride, mWidth, mHeight);
+ }
finishWork(*(int64_t *)img->user_priv, work, std::move(block));
return true;
}
diff --git a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
index 6fd9200..d4b973f 100644
--- a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
@@ -156,63 +156,31 @@
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
- // handle configuration changes in work done
- if (!work->worklets.empty() &&
- (work->worklets.front()->output.configUpdate.size() != 0)) {
- ALOGV("Config Update");
- std::vector<std::unique_ptr<C2Param>> updates =
- std::move(work->worklets.front()->output.configUpdate);
- std::vector<C2Param*> configParam;
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- for (size_t i = 0; i < updates.size(); ++i) {
- C2Param* param = updates[i].get();
- if ((param->index() == C2StreamSampleRateInfo::output::PARAM_TYPE) ||
- (param->index() == C2StreamChannelCountInfo::output::PARAM_TYPE)) {
- configParam.push_back(param);
+ if (!work->worklets.empty()) {
+ // For decoder components current timestamp always exceeds
+ // previous timestamp
+ bool codecConfig = ((work->worklets.front()->output.flags &
+ C2FrameData::FLAG_CODEC_CONFIG) != 0);
+ if (!codecConfig &&
+ !work->worklets.front()->output.buffers.empty()) {
+ EXPECT_GE(work->worklets.front()->output.ordinal.timestamp.peeku(),
+ mTimestampUs);
+ mTimestampUs =
+ work->worklets.front()->output.ordinal.timestamp.peeku();
+ uint32_t rangeLength =
+ work->worklets.front()->output.buffers[0]->data()
+ .linearBlocks().front().map().get().capacity();
+ //List of timestamp values and output size to calculate timestamp
+ if (mTimestampDevTest) {
+ outputMetaData meta = {mTimestampUs, rangeLength};
+ oBufferMetaData.push_back(meta);
}
}
- mComponent->config(configParam, C2_DONT_BLOCK, &failures);
- ASSERT_EQ(failures.size(), 0u);
- }
- mFramesReceived++;
- mEos = (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM) != 0;
- auto frameIndexIt =
- std::find(mFlushedIndices.begin(), mFlushedIndices.end(),
- work->input.ordinal.frameIndex.peeku());
- ALOGV("WorkDone: frameID received %d",
- (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
-
- // For decoder components current timestamp always exceeds
- // previous timestamp
- bool codecConfig = ((work->worklets.front()->output.flags &
- C2FrameData::FLAG_CODEC_CONFIG) != 0);
- if (!codecConfig &&
- !work->worklets.front()->output.buffers.empty()) {
- EXPECT_GE(work->worklets.front()->output.ordinal.timestamp.peeku(),
- mTimestampUs);
- mTimestampUs =
- work->worklets.front()->output.ordinal.timestamp.peeku();
- uint32_t rangeLength =
- work->worklets.front()->output.buffers[0]->data()
- .linearBlocks().front().map().get().capacity();
- //List of timestamp values and output size to calculate timestamp
- if (mTimestampDevTest) {
- outputMetaData meta = {mTimestampUs, rangeLength};
- oBufferMetaData.push_back(meta);
- }
- }
-
- work->input.buffers.clear();
- work->worklets.clear();
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(mQueueLock);
- mWorkQueue.push_back(std::move(work));
- if (!mFlushedIndices.empty()) {
- mFlushedIndices.erase(frameIndexIt);
- }
- mQueueCondition.notify_all();
+ bool mCsd = false;
+ workDone(mComponent, work, mFlushedIndices, mQueueLock,
+ mQueueCondition, mWorkQueue, mEos, mCsd,
+ mFramesReceived);
+ (void)mCsd;
}
}
}
@@ -362,67 +330,82 @@
}
}
+// number of elementary streams per component
+#define STREAM_COUNT 2
+
// LookUpTable of clips and metadata for component testing
void GetURLForComponent(Codec2AudioDecHidlTest::standardComp comp, char* mURL,
- char* info) {
+ char* info, size_t streamIndex = 0) {
struct CompToURL {
Codec2AudioDecHidlTest::standardComp comp;
- const char* mURL;
- const char* info;
+ const char mURL[STREAM_COUNT][512];
+ const char info[STREAM_COUNT][512];
};
+ ASSERT_TRUE(streamIndex < STREAM_COUNT);
+
static const CompToURL kCompToURL[] = {
{Codec2AudioDecHidlTest::standardComp::xaac,
- "bbb_aac_stereo_128kbps_48000hz.aac",
- "bbb_aac_stereo_128kbps_48000hz.info"},
+ {"bbb_aac_stereo_128kbps_48000hz.aac",
+ "bbb_aac_stereo_128kbps_48000hz.aac"},
+ {"bbb_aac_stereo_128kbps_48000hz.info",
+ "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"}},
{Codec2AudioDecHidlTest::standardComp::mp3,
- "bbb_mp3_stereo_192kbps_48000hz.mp3",
- "bbb_mp3_stereo_192kbps_48000hz.info"},
+ {"bbb_mp3_stereo_192kbps_48000hz.mp3",
+ "bbb_mp3_stereo_192kbps_48000hz.mp3"},
+ {"bbb_mp3_stereo_192kbps_48000hz.info",
+ "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"}},
{Codec2AudioDecHidlTest::standardComp::aac,
- "bbb_aac_stereo_128kbps_48000hz.aac",
- "bbb_aac_stereo_128kbps_48000hz.info"},
+ {"bbb_aac_stereo_128kbps_48000hz.aac",
+ "bbb_aac_stereo_128kbps_48000hz.aac"},
+ {"bbb_aac_stereo_128kbps_48000hz.info",
+ "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"}},
{Codec2AudioDecHidlTest::standardComp::amrnb,
- "sine_amrnb_1ch_12kbps_8000hz.amrnb",
- "sine_amrnb_1ch_12kbps_8000hz.info"},
+ {"sine_amrnb_1ch_12kbps_8000hz.amrnb",
+ "sine_amrnb_1ch_12kbps_8000hz.amrnb"},
+ {"sine_amrnb_1ch_12kbps_8000hz.info",
+ "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"}},
{Codec2AudioDecHidlTest::standardComp::amrwb,
- "bbb_amrwb_1ch_14kbps_16000hz.amrwb",
- "bbb_amrwb_1ch_14kbps_16000hz.info"},
+ {"bbb_amrwb_1ch_14kbps_16000hz.amrwb",
+ "bbb_amrwb_1ch_14kbps_16000hz.amrwb"},
+ {"bbb_amrwb_1ch_14kbps_16000hz.info",
+ "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"}},
{Codec2AudioDecHidlTest::standardComp::vorbis,
- "bbb_vorbis_stereo_128kbps_48000hz.vorbis",
- "bbb_vorbis_stereo_128kbps_48000hz.info"},
+ {"bbb_vorbis_stereo_128kbps_48000hz.vorbis", ""},
+ {"bbb_vorbis_stereo_128kbps_48000hz.info", ""}},
{Codec2AudioDecHidlTest::standardComp::opus,
- "bbb_opus_stereo_128kbps_48000hz.opus",
- "bbb_opus_stereo_128kbps_48000hz.info"},
+ {"bbb_opus_stereo_128kbps_48000hz.opus", ""},
+ {"bbb_opus_stereo_128kbps_48000hz.info", ""}},
{Codec2AudioDecHidlTest::standardComp::g711alaw,
- "bbb_g711alaw_1ch_8khz.raw",
- "bbb_g711alaw_1ch_8khz.info"},
+ {"bbb_g711alaw_1ch_8khz.raw", ""},
+ {"bbb_g711alaw_1ch_8khz.info", ""}},
{Codec2AudioDecHidlTest::standardComp::g711mlaw,
- "bbb_g711mulaw_1ch_8khz.raw",
- "bbb_g711mulaw_1ch_8khz.info"},
+ {"bbb_g711mulaw_1ch_8khz.raw", ""},
+ {"bbb_g711mulaw_1ch_8khz.info", ""}},
{Codec2AudioDecHidlTest::standardComp::gsm,
- "bbb_gsm_1ch_8khz_13kbps.raw",
- "bbb_gsm_1ch_8khz_13kbps.info"},
+ {"bbb_gsm_1ch_8khz_13kbps.raw", ""},
+ {"bbb_gsm_1ch_8khz_13kbps.info", ""}},
{Codec2AudioDecHidlTest::standardComp::raw,
- "bbb_raw_1ch_8khz_s32le.raw",
- "bbb_raw_1ch_8khz_s32le.info"},
+ {"bbb_raw_1ch_8khz_s32le.raw", ""},
+ {"bbb_raw_1ch_8khz_s32le.info", ""}},
{Codec2AudioDecHidlTest::standardComp::flac,
- "bbb_flac_stereo_680kbps_48000hz.flac",
- "bbb_flac_stereo_680kbps_48000hz.info"},
+ {"bbb_flac_stereo_680kbps_48000hz.flac", ""},
+ {"bbb_flac_stereo_680kbps_48000hz.info", ""}},
};
for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
if (kCompToURL[i].comp == comp) {
- strcat(mURL, kCompToURL[i].mURL);
- strcat(info, kCompToURL[i].info);
+ strcat(mURL, kCompToURL[i].mURL[streamIndex]);
+ strcat(info, kCompToURL[i].info[streamIndex]);
return;
}
}
}
-void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component> &component,
- std::mutex &queueLock, std::condition_variable &queueCondition,
- std::list<std::unique_ptr<C2Work>> &workQueue,
- std::list<uint64_t> &flushedIndices,
- std::shared_ptr<C2BlockPool> &linearPool,
+void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex &queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ std::list<uint64_t>& flushedIndices,
+ std::shared_ptr<C2BlockPool>& linearPool,
std::ifstream& eleStream,
android::Vector<FrameInfo>* Info,
int offset, int range, bool signalEOS = true) {
@@ -462,34 +445,37 @@
}
int size = (*Info)[frameID].bytesCount;
char* data = (char*)malloc(size);
+ ASSERT_NE(data, nullptr);
eleStream.read(data, size);
ASSERT_EQ(eleStream.gcount(), size);
- std::shared_ptr<C2LinearBlock> block;
- ASSERT_EQ(C2_OK,
- linearPool->fetchLinearBlock(
- size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
- &block));
- ASSERT_TRUE(block);
-
- // Write View
- C2WriteView view = block->map().get();
- if (view.error() != C2_OK) {
- fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
- break;
- }
- ASSERT_EQ((size_t)size, view.capacity());
- ASSERT_EQ(0u, view.offset());
- ASSERT_EQ((size_t)size, view.size());
-
- memcpy(view.base(), data, size);
-
work->input.buffers.clear();
- work->input.buffers.emplace_back(new LinearBuffer(block));
+ if (size) {
+ std::shared_ptr<C2LinearBlock> block;
+ ASSERT_EQ(C2_OK,
+ linearPool->fetchLinearBlock(
+ size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+ &block));
+ ASSERT_TRUE(block);
+
+ // Write View
+ C2WriteView view = block->map().get();
+ if (view.error() != C2_OK) {
+ fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
+ break;
+ }
+ ASSERT_EQ((size_t)size, view.capacity());
+ ASSERT_EQ(0u, view.offset());
+ ASSERT_EQ((size_t)size, view.size());
+
+ memcpy(view.base(), data, size);
+
+ work->input.buffers.emplace_back(new LinearBuffer(block));
+ free(data);
+ }
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
- free(data);
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
@@ -502,29 +488,6 @@
}
}
-void waitOnInputConsumption(std::mutex& queueLock,
- std::condition_variable& queueCondition,
- std::list<std::unique_ptr<C2Work>>& workQueue,
- size_t bufferCount = MAX_INPUT_BUFFERS) {
- typedef std::unique_lock<std::mutex> ULock;
- uint32_t queueSize;
- uint32_t maxRetry = 0;
- {
- ULock l(queueLock);
- queueSize = workQueue.size();
- }
- while ((maxRetry < MAX_RETRY) && (queueSize < bufferCount)) {
- ULock l(queueLock);
- if (queueSize != workQueue.size()) {
- queueSize = workQueue.size();
- maxRetry = 0;
- } else {
- queueCondition.wait_for(l, TIME_OUT);
- maxRetry++;
- }
- }
-}
-
TEST_F(Codec2AudioDecHidlTest, validateCompName) {
if (mDisableTest) return;
ALOGV("Checks if the given component is a valid audio component");
@@ -543,10 +506,15 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-TEST_F(Codec2AudioDecHidlTest, DecodeTest) {
+class Codec2AudioDecDecodeTest : public Codec2AudioDecHidlTest,
+ public ::testing::WithParamInterface<int32_t> {
+};
+
+TEST_P(Codec2AudioDecDecodeTest, DecodeTest) {
description("Decodes input file");
if (mDisableTest) return;
+ uint32_t streamIndex = GetParam();
ASSERT_EQ(mComponent->start(), C2_OK);
mTimestampDevTest = true;
char mURL[512], info[512];
@@ -554,7 +522,12 @@
strcpy(mURL, gEnv->getRes().c_str());
strcpy(info, gEnv->getRes().c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mCompName, mURL, info, streamIndex);
+ if (!strcmp(mURL, gEnv->getRes().c_str())) {
+ ALOGV("EMPTY INPUT gEnv->getRes().c_str() %s mURL %s ",
+ gEnv->getRes().c_str(), mURL);
+ return;
+ }
eleInfo.open(info);
ASSERT_EQ(eleInfo.is_open(), true);
@@ -573,6 +546,9 @@
Info.push_back({bytesCount, flags, timestamp});
}
eleInfo.close();
+ // Reset total no of frames received
+ mFramesReceived = 0;
+ mTimestampUs = 0;
int32_t bitStreamInfo[2] = {0};
if (mCompName == raw) {
bitStreamInfo[0] = 8000;
@@ -629,6 +605,9 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
+INSTANTIATE_TEST_CASE_P(StreamIndexes, Codec2AudioDecDecodeTest,
+ ::testing::Values(0, 1));
+
// thumbnail test
TEST_F(Codec2AudioDecHidlTest, ThumbnailTest) {
description("Test Request for thumbnail");
@@ -718,7 +697,6 @@
ASSERT_EQ(mComponent->queue(&items), C2_OK);
{
- typedef std::unique_lock<std::mutex> ULock;
ULock l(mQueueLock);
if (mWorkQueue.size() != MAX_INPUT_BUFFERS) {
mQueueCondition.wait_for(l, TIME_OUT);
@@ -729,46 +707,6 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-TEST_F(Codec2AudioDecHidlTest, EmptyBufferTest) {
- description("Tests empty input buffer");
- if (mDisableTest) return;
- typedef std::unique_lock<std::mutex> ULock;
- ASSERT_EQ(mComponent->start(), C2_OK);
- std::unique_ptr<C2Work> work;
- // Prepare C2Work
- {
- ULock l(mQueueLock);
- if (!mWorkQueue.empty()) {
- work.swap(mWorkQueue.front());
- mWorkQueue.pop_front();
- } else {
- ASSERT_TRUE(false) << "mWorkQueue Empty at the start of test";
- }
- }
- ASSERT_NE(work, nullptr);
-
- work->input.flags = (C2FrameData::flags_t)0;
- work->input.ordinal.timestamp = 0;
- work->input.ordinal.frameIndex = 0;
- work->input.buffers.clear();
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
-
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
- ASSERT_EQ(mComponent->queue(&items), C2_OK);
-
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(mQueueLock);
- if (mWorkQueue.size() != MAX_INPUT_BUFFERS) {
- mQueueCondition.wait_for(l, TIME_OUT);
- }
- }
- ASSERT_EQ(mWorkQueue.size(), (size_t)MAX_INPUT_BUFFERS);
- ASSERT_EQ(mComponent->stop(), C2_OK);
-}
-
TEST_F(Codec2AudioDecHidlTest, FlushTest) {
description("Tests Flush calls");
if (mDisableTest) return;
@@ -891,6 +829,72 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
+TEST_F(Codec2AudioDecHidlTest, DecodeTestEmptyBuffersInserted) {
+ description("Decode with multiple empty input frames");
+ if (mDisableTest) return;
+
+ ASSERT_EQ(mComponent->start(), C2_OK);
+
+ char mURL[512], info[512];
+ std::ifstream eleStream, eleInfo;
+
+ strcpy(mURL, gEnv->getRes().c_str());
+ strcpy(info, gEnv->getRes().c_str());
+ GetURLForComponent(mCompName, mURL, info);
+
+ eleInfo.open(info);
+ ASSERT_EQ(eleInfo.is_open(), true) << mURL << " - file not found";
+ android::Vector<FrameInfo> Info;
+ int bytesCount = 0;
+ uint32_t frameId = 0;
+ uint32_t flags = 0;
+ uint32_t timestamp = 0;
+ bool codecConfig = false;
+ // This test introduces empty CSD after every 20th frame
+ // and empty input frames at an interval of 5 frames.
+ while (1) {
+ if (!(frameId % 5)) {
+ if (!(frameId % 20)) flags = 32;
+ else flags = 0;
+ bytesCount = 0;
+ } else {
+ if (!(eleInfo >> bytesCount)) break;
+ eleInfo >> flags;
+ eleInfo >> timestamp;
+ codecConfig = flags ?
+ ((1 << (flags - 1)) & C2FrameData::FLAG_CODEC_CONFIG) != 0 : 0;
+ }
+ Info.push_back({bytesCount, flags, timestamp});
+ frameId++;
+ }
+ eleInfo.close();
+
+ ALOGV("mURL : %s", mURL);
+ eleStream.open(mURL, std::ifstream::binary);
+ ASSERT_EQ(eleStream.is_open(), true);
+ ASSERT_NO_FATAL_FAILURE(decodeNFrames(
+ mComponent, mQueueLock, mQueueCondition, mWorkQueue, mFlushedIndices,
+ mLinearPool, eleStream, &Info, 0, (int)Info.size()));
+
+ // blocking call to ensures application to Wait till all the inputs are
+ // consumed
+ if (!mEos) {
+ ALOGV("Waiting for input consumption");
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
+ }
+
+ eleStream.close();
+ if (mFramesReceived != Info.size()) {
+ ALOGE("Input buffer count and Output buffer count mismatch");
+ ALOGV("framesReceived : %d inputFrames : %zu", mFramesReceived,
+ Info.size());
+ ASSERT_TRUE(false);
+ }
+
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+}
+
} // anonymous namespace
int main(int argc, char** argv) {
diff --git a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
index 4f86aad..5d66ee5 100644
--- a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
@@ -96,6 +96,7 @@
const StringToName kStringToName[] = {
{"aac", aac},
{"flac", flac},
+ {"opus", opus},
{"amrnb", amrnb},
{"amrwb", amrwb},
};
@@ -135,45 +136,17 @@
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
- // handle configuration changes in work done
- if (!work->worklets.empty() &&
- (work->worklets.front()->output.configUpdate.size() != 0)) {
- ALOGV("Config Update");
- std::vector<std::unique_ptr<C2Param>> updates =
- std::move(work->worklets.front()->output.configUpdate);
- std::vector<C2Param*> configParam;
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- for (size_t i = 0; i < updates.size(); ++i) {
- C2Param* param = updates[i].get();
- if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
- mCsd = true;
- }
- }
- }
- mFramesReceived++;
- mEos = (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM) != 0;
- auto frameIndexIt =
- std::find(mFlushedIndices.begin(), mFlushedIndices.end(),
- work->input.ordinal.frameIndex.peeku());
- ALOGV("WorkDone: frameID received %d",
- (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
- work->input.buffers.clear();
- work->worklets.clear();
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(mQueueLock);
- mWorkQueue.push_back(std::move(work));
- if (!mFlushedIndices.empty()) {
- mFlushedIndices.erase(frameIndexIt);
- }
- mQueueCondition.notify_all();
+ if (!work->worklets.empty()) {
+ workDone(mComponent, work, mFlushedIndices, mQueueLock,
+ mQueueCondition, mWorkQueue, mEos, mCsd,
+ mFramesReceived);
}
}
}
enum standardComp {
aac,
flac,
+ opus,
amrnb,
amrwb,
unknown_comp,
@@ -275,6 +248,8 @@
"bbb_raw_1ch_16khz_s16le.raw"},
{Codec2AudioEncHidlTest::standardComp::flac,
"bbb_raw_2ch_48khz_s16le.raw"},
+ {Codec2AudioEncHidlTest::standardComp::opus,
+ "bbb_raw_2ch_48khz_s16le.raw"},
};
for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
@@ -285,11 +260,11 @@
}
}
-void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component> &component,
- std::mutex &queueLock, std::condition_variable &queueCondition,
- std::list<std::unique_ptr<C2Work>> &workQueue,
- std::list<uint64_t> &flushedIndices,
- std::shared_ptr<C2BlockPool> &linearPool,
+void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex &queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ std::list<uint64_t>& flushedIndices,
+ std::shared_ptr<C2BlockPool>& linearPool,
std::ifstream& eleStream, uint32_t nFrames,
int32_t samplesPerFrame, int32_t nChannels,
int32_t nSampleRate, bool flushed = false,
@@ -334,6 +309,7 @@
flushedIndices.emplace_back(frameID);
}
char* data = (char*)malloc(bytesCount);
+ ASSERT_NE(data, nullptr);
eleStream.read(data, bytesCount);
ASSERT_EQ(eleStream.gcount(), bytesCount);
std::shared_ptr<C2LinearBlock> block;
@@ -372,29 +348,6 @@
}
}
-void waitOnInputConsumption(std::mutex& queueLock,
- std::condition_variable& queueCondition,
- std::list<std::unique_ptr<C2Work>>& workQueue,
- size_t bufferCount = MAX_INPUT_BUFFERS) {
- typedef std::unique_lock<std::mutex> ULock;
- uint32_t queueSize;
- uint32_t maxRetry = 0;
- {
- ULock l(queueLock);
- queueSize = workQueue.size();
- }
- while ((maxRetry < MAX_RETRY) && (queueSize < bufferCount)) {
- ULock l(queueLock);
- if (queueSize != workQueue.size()) {
- queueSize = workQueue.size();
- maxRetry = 0;
- } else {
- queueCondition.wait_for(l, TIME_OUT);
- maxRetry++;
- }
- }
-}
-
TEST_F(Codec2AudioEncHidlTest, validateCompName) {
if (mDisableTest) return;
ALOGV("Checks if the given component is a valid audio component");
@@ -425,6 +378,11 @@
nSampleRate = 48000;
samplesPerFrame = 1152;
break;
+ case opus:
+ nChannels = 2;
+ nSampleRate = 48000;
+ samplesPerFrame = 960;
+ break;
case amrnb:
nChannels = 1;
nSampleRate = 8000;
@@ -458,7 +416,7 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == aac)) {
+ if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
if (!mCsd) {
ALOGE("CSD buffer missing");
ASSERT_TRUE(false);
@@ -508,46 +466,6 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-TEST_F(Codec2AudioEncHidlTest, EmptyBufferTest) {
- description("Tests empty input buffer");
- if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
-
- typedef std::unique_lock<std::mutex> ULock;
- std::unique_ptr<C2Work> work;
- {
- ULock l(mQueueLock);
- if (!mWorkQueue.empty()) {
- work.swap(mWorkQueue.front());
- mWorkQueue.pop_front();
- } else {
- ALOGE("mWorkQueue Empty is not expected at the start of the test");
- ASSERT_TRUE(false);
- }
- }
- ASSERT_NE(work, nullptr);
- work->input.flags = (C2FrameData::flags_t)0;
- work->input.ordinal.timestamp = 0;
- work->input.ordinal.frameIndex = 0;
- work->input.buffers.clear();
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
-
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
- ASSERT_EQ(mComponent->queue(&items), C2_OK);
- uint32_t queueSize;
- {
- ULock l(mQueueLock);
- queueSize = mWorkQueue.size();
- if (queueSize < MAX_INPUT_BUFFERS) {
- mQueueCondition.wait_for(l, TIME_OUT);
- }
- }
- ASSERT_EQ(mWorkQueue.size(), (uint32_t)MAX_INPUT_BUFFERS);
- ASSERT_EQ(mComponent->stop(), C2_OK);
-}
-
TEST_F(Codec2AudioEncHidlTest, FlushTest) {
description("Test Request for flush");
if (mDisableTest) return;
@@ -574,6 +492,11 @@
nSampleRate = 48000;
samplesPerFrame = 1152;
break;
+ case opus:
+ nChannels = 2;
+ nSampleRate = 48000;
+ samplesPerFrame = 960;
+ break;
case amrnb:
nChannels = 1;
nSampleRate = 8000;
diff --git a/media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h b/media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h
index 89eb69e..4d773ce 100644
--- a/media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h
@@ -17,8 +17,5 @@
#ifndef MEDIA_C2_AUDIO_HIDL_TEST_COMMON_H
#define MEDIA_C2_AUDIO_HIDL_TEST_COMMON_H
-#define MAX_RETRY 20
-#define TIME_OUT 200ms
-#define MAX_INPUT_BUFFERS 8
#endif // MEDIA_C2_AUDIO_HIDL_TEST_COMMON_H
diff --git a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp
index fdccdbb..64a458c 100644
--- a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp
@@ -14,39 +14,115 @@
* limitations under the License.
*/
+// #define LOG_NDEBUG 0
#define LOG_TAG "media_c2_hidl_test_common"
#include <stdio.h>
#include "media_c2_hidl_test_common.h"
-using ::android::hardware::media::c2::V1_0::FieldSupportedValues;
-void dumpFSV(const FieldSupportedValues& sv) {
- ALOGD("Dumping FSV data");
- using namespace std;
- if (sv.type == FieldSupportedValues::Type::EMPTY) {
- ALOGD("FSV Value is Empty");
- }
- if (sv.type == FieldSupportedValues::Type::RANGE) {
- ALOGD("Dumping FSV range");
- cout << ".range(" << sv.range.min;
- if (sv.range.step != 0) {
- cout << ":" << sv.range.step;
+// Test the codecs for NullBuffer, Empty Input Buffer with(out) flags set
+void testInputBuffer(
+ const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex& queueLock, std::list<std::unique_ptr<C2Work>>& workQueue,
+ uint32_t flags, bool isNullBuffer) {
+ std::unique_ptr<C2Work> work;
+ {
+ typedef std::unique_lock<std::mutex> ULock;
+ ULock l(queueLock);
+ if (!workQueue.empty()) {
+ work.swap(workQueue.front());
+ workQueue.pop_front();
+ } else {
+ ASSERT_TRUE(false) << "workQueue Empty at the start of test";
}
- if (sv.range.num != 1 || sv.range.denom != 1) {
- cout << ":" << sv.range.num << "/" << sv.range.denom;
- }
- cout << " " << sv.range.max << ")";
}
- if (sv.values.size()) {
- ALOGD("Dumping FSV value");
- cout << (sv.type == FieldSupportedValues::Type::FLAGS ? ".flags("
- : ".list(");
- const char* sep = "";
- for (const auto& p : sv.values) {
- cout << sep << p;
- sep = ",";
- }
- cout << ")";
+ ASSERT_NE(work, nullptr);
+
+ work->input.flags = (C2FrameData::flags_t)flags;
+ work->input.ordinal.timestamp = 0;
+ work->input.ordinal.frameIndex = 0;
+ work->input.buffers.clear();
+ if (isNullBuffer) {
+ work->input.buffers.emplace_back(nullptr);
}
- cout << endl;
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+
+ std::list<std::unique_ptr<C2Work>> items;
+ items.push_back(std::move(work));
+ ASSERT_EQ(component->queue(&items), C2_OK);
}
+
+// Wait for all the inputs to be consumed by the plugin.
+void waitOnInputConsumption(std::mutex& queueLock,
+ std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ size_t bufferCount) {
+ typedef std::unique_lock<std::mutex> ULock;
+ uint32_t queueSize;
+ uint32_t maxRetry = 0;
+ {
+ ULock l(queueLock);
+ queueSize = workQueue.size();
+ }
+ while ((maxRetry < MAX_RETRY) && (queueSize < bufferCount)) {
+ ULock l(queueLock);
+ if (queueSize != workQueue.size()) {
+ queueSize = workQueue.size();
+ maxRetry = 0;
+ } else {
+ queueCondition.wait_for(l, TIME_OUT);
+ maxRetry++;
+ }
+ }
+}
+
+// process onWorkDone received by Listener
+void workDone(
+ const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::unique_ptr<C2Work>& work, std::list<uint64_t>& flushedIndices,
+ std::mutex& queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue, bool& eos, bool& csd,
+ uint32_t& framesReceived) {
+ // handle configuration changes in work done
+ if (work->worklets.front()->output.configUpdate.size() != 0) {
+ ALOGV("Config Update");
+ std::vector<std::unique_ptr<C2Param>> updates =
+ std::move(work->worklets.front()->output.configUpdate);
+ std::vector<C2Param*> configParam;
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ for (size_t i = 0; i < updates.size(); ++i) {
+ C2Param* param = updates[i].get();
+ if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
+ csd = true;
+ } else if ((param->index() ==
+ C2StreamSampleRateInfo::output::PARAM_TYPE) ||
+ (param->index() ==
+ C2StreamChannelCountInfo::output::PARAM_TYPE) ||
+ (param->index() ==
+ C2VideoSizeStreamInfo::output::PARAM_TYPE)) {
+ configParam.push_back(param);
+ }
+ }
+ component->config(configParam, C2_DONT_BLOCK, &failures);
+ ASSERT_EQ(failures.size(), 0u);
+ }
+ framesReceived++;
+ eos = (work->worklets.front()->output.flags &
+ C2FrameData::FLAG_END_OF_STREAM) != 0;
+ auto frameIndexIt = std::find(flushedIndices.begin(), flushedIndices.end(),
+ work->input.ordinal.frameIndex.peeku());
+ ALOGV("WorkDone: frameID received %d",
+ (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
+ work->input.buffers.clear();
+ work->worklets.clear();
+ {
+ typedef std::unique_lock<std::mutex> ULock;
+ ULock l(queueLock);
+ workQueue.push_back(std::move(work));
+ if (!flushedIndices.empty()) {
+ flushedIndices.erase(frameIndexIt);
+ }
+ queueCondition.notify_all();
+ }
+}
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h
index f765baa..a688530 100644
--- a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h
@@ -22,6 +22,7 @@
#include <android/hardware/media/c2/1.0/types.h>
#include <C2Component.h>
+#include <C2Config.h>
#include <getopt.h>
#include <hidl/HidlSupport.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -38,6 +39,10 @@
#include <VtsHalHidlTargetTestEnvBase.h>
+#define MAX_RETRY 20
+#define TIME_OUT 400ms
+#define MAX_INPUT_BUFFERS 8
+
/*
* Handle Callback functions onWorkDone(), onTripped(),
* onError(), onDeath(), onFramesRendered()
@@ -176,5 +181,21 @@
/*
* common functions declarations
*/
-void dumpFSV(const FieldSupportedValues& sv);
+void testInputBuffer(
+ const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex& queueLock, std::list<std::unique_ptr<C2Work>>& workQueue,
+ uint32_t flags, bool isNullBuffer);
+
+void waitOnInputConsumption(std::mutex& queueLock,
+ std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ size_t bufferCount = MAX_INPUT_BUFFERS);
+
+void workDone(
+ const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::unique_ptr<C2Work>& work, std::list<uint64_t>& flushedIndices,
+ std::mutex& queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue, bool& eos, bool& csd,
+ uint32_t& framesReceived);
+
#endif // MEDIA_C2_HIDL_TEST_COMMON_H
diff --git a/media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp
index b7fb655..ec803d7 100644
--- a/media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp
@@ -38,14 +38,21 @@
public:
virtual void SetUp() override {
Super::SetUp();
+ mEos = false;
mClient = android::Codec2Client::CreateFromService(
gEnv->getInstance().c_str());
ASSERT_NE(mClient, nullptr);
- mListener.reset(new CodecListener());
+ mListener.reset(new CodecListener(
+ [this](std::list<std::unique_ptr<C2Work>>& workItems) {
+ handleWorkDone(workItems);
+ }));
ASSERT_NE(mListener, nullptr);
mClient->createComponent(gEnv->getComponent().c_str(), mListener,
&mComponent);
ASSERT_NE(mComponent, nullptr);
+ for (int i = 0; i < MAX_INPUT_BUFFERS; ++i) {
+ mWorkQueue.emplace_back(new C2Work);
+ }
}
virtual void TearDown() override {
@@ -59,6 +66,23 @@
}
Super::TearDown();
}
+ // callback function to process onWorkDone received by Listener
+ void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
+ for (std::unique_ptr<C2Work>& work : workItems) {
+ if (!work->worklets.empty()) {
+ bool mCsd = false;
+ uint32_t mFramesReceived = 0;
+ std::list<uint64_t> mFlushedIndices;
+ workDone(mComponent, work, mFlushedIndices, mQueueLock, mQueueCondition,
+ mWorkQueue, mEos, mCsd, mFramesReceived);
+ }
+ }
+ }
+
+ bool mEos;
+ std::mutex mQueueLock;
+ std::condition_variable mQueueCondition;
+ std::list<std::unique_ptr<C2Work>> mWorkQueue;
std::shared_ptr<android::Codec2Client> mClient;
std::shared_ptr<android::Codec2Client::Listener> mListener;
@@ -135,8 +159,6 @@
ALOGV("Multiple Start Stop and Reset Test");
c2_status_t err = C2_OK;
-#define MAX_RETRY 16
-
for (size_t i = 0; i < MAX_RETRY; i++) {
err = mComponent->start();
ASSERT_EQ(err, C2_OK);
@@ -184,13 +206,44 @@
ASSERT_EQ(err, C2_OK);
ASSERT_EQ(failures.size(), 0u);
-#define MAX_RETRY 16
for (size_t i = 0; i < MAX_RETRY; i++) {
err = mComponent->release();
ASSERT_EQ(err, C2_OK);
}
}
+class Codec2ComponentInputTests : public Codec2ComponentHidlTest,
+ public ::testing::WithParamInterface<std::pair<uint32_t, bool> > {
+};
+
+TEST_P(Codec2ComponentInputTests, InputBufferTest) {
+ description("Tests for different inputs");
+
+ uint32_t flags = GetParam().first;
+ bool isNullBuffer = GetParam().second;
+ if (isNullBuffer) ALOGD("Testing for null input buffer with flag : %u", flags);
+ else ALOGD("Testing for empty input buffer with flag : %u", flags);
+ mEos = false;
+ ASSERT_EQ(mComponent->start(), C2_OK);
+ ASSERT_NO_FATAL_FAILURE(testInputBuffer(
+ mComponent, mQueueLock, mWorkQueue, flags, isNullBuffer));
+
+ ALOGD("Waiting for input consumption");
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
+
+ if (flags == C2FrameData::FLAG_END_OF_STREAM) ASSERT_EQ(mEos, true);
+ ASSERT_EQ(mComponent->stop(), C2_OK);
+ ASSERT_EQ(mComponent->reset(), C2_OK);
+}
+
+INSTANTIATE_TEST_CASE_P(NonStdInputs, Codec2ComponentInputTests, ::testing::Values(
+ std::make_pair(0, true),
+ std::make_pair(C2FrameData::FLAG_END_OF_STREAM, true),
+ std::make_pair(0, false),
+ std::make_pair(C2FrameData::FLAG_CODEC_CONFIG, false),
+ std::make_pair(C2FrameData::FLAG_END_OF_STREAM, false)));
+
} // anonymous namespace
// TODO: Add test for Invalid work,
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info
new file mode 100644
index 0000000..182af20
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info
@@ -0,0 +1,443 @@
+5 32 0
+5 32 0
+337 1 0
+322 1 21333
+279 1 42666
+563 1 64000
+635 1 106666
+634 1 149333
+629 1 192000
+680 1 234666
+688 1 277333
+1036 1 320000
+1040 1 384000
+1009 1 448000
+1020 1 512000
+1357 1 576000
+1353 1 661333
+1351 1 746666
+1351 1 832000
+343 1 917333
+335 1 938666
+339 1 960000
+342 1 981333
+348 1 1002666
+350 1 1024000
+351 1 1045333
+342 1 1066666
+366 1 1088000
+340 1 1109333
+354 1 1130666
+340 1 1152000
+334 1 1173333
+338 1 1194666
+340 1 1216000
+351 1 1237333
+346 1 1258666
+331 1 1280000
+321 1 1301333
+343 1 1322666
+342 1 1344000
+345 1 1365333
+326 1 1386666
+342 1 1408000
+356 1 1429333
+351 1 1450666
+343 1 1472000
+347 1 1493333
+349 1 1514666
+350 1 1536000
+330 1 1557333
+341 1 1578666
+340 1 1600000
+330 1 1621333
+340 1 1642666
+335 1 1664000
+344 1 1685333
+359 1 1706666
+337 1 1728000
+346 1 1749333
+330 1 1770666
+351 1 1792000
+355 1 1813333
+352 1 1834666
+325 1 1856000
+342 1 1877333
+327 1 1898666
+349 1 1920000
+326 1 1941333
+337 1 1962666
+378 1 1984000
+321 1 2005333
+319 1 2026666
+346 1 2048000
+352 1 2069333
+349 1 2090666
+331 1 2112000
+330 1 2133333
+329 1 2154666
+333 1 2176000
+367 1 2197333
+362 1 2218666
+337 1 2240000
+337 1 2261333
+360 1 2282666
+333 1 2304000
+317 1 2325333
+344 1 2346666
+335 1 2368000
+337 1 2389333
+349 1 2410666
+336 1 2432000
+348 1 2453333
+349 1 2474666
+342 1 2496000
+359 1 2517333
+340 1 2538666
+340 1 2560000
+348 1 2581333
+334 1 2602666
+328 1 2624000
+341 1 2645333
+339 1 2666666
+337 1 2688000
+350 1 2709333
+326 1 2730666
+360 1 2752000
+344 1 2773333
+340 1 2794666
+343 1 2816000
+361 1 2837333
+329 1 2858666
+345 1 2880000
+345 1 2901333
+330 1 2922666
+342 1 2944000
+344 1 2965333
+330 1 2986666
+329 1 3008000
+335 1 3029333
+366 1 3050666
+328 1 3072000
+349 1 3093333
+339 1 3114666
+340 1 3136000
+335 1 3157333
+327 1 3178666
+348 1 3200000
+339 1 3221333
+334 1 3242666
+350 1 3264000
+325 1 3285333
+361 1 3306666
+338 1 3328000
+350 1 3349333
+353 1 3370666
+327 1 3392000
+346 1 3413333
+348 1 3434666
+339 1 3456000
+342 1 3477333
+334 1 3498666
+350 1 3520000
+354 1 3541333
+363 1 3562666
+322 1 3584000
+337 1 3605333
+355 1 3626666
+329 1 3648000
+324 1 3669333
+338 1 3690666
+356 1 3712000
+330 1 3733333
+321 1 3754666
+337 1 3776000
+345 1 3797333
+335 1 3818666
+348 1 3840000
+342 1 3861333
+348 1 3882666
+335 1 3904000
+344 1 3925333
+357 1 3946666
+368 1 3968000
+324 1 3989333
+343 1 4010666
+341 1 4032000
+329 1 4053333
+356 1 4074666
+317 1 4096000
+351 1 4117333
+340 1 4138666
+340 1 4160000
+332 1 4181333
+355 1 4202666
+357 1 4224000
+327 1 4245333
+338 1 4266666
+323 1 4288000
+346 1 4309333
+352 1 4330666
+347 1 4352000
+343 1 4373333
+311 1 4394666
+338 1 4416000
+365 1 4437333
+349 1 4458666
+327 1 4480000
+355 1 4501333
+319 1 4522666
+349 1 4544000
+351 1 4565333
+337 1 4586666
+340 1 4608000
+349 1 4629333
+316 1 4650666
+344 1 4672000
+334 1 4693333
+344 1 4714666
+347 1 4736000
+348 1 4757333
+334 1 4778666
+338 1 4800000
+331 1 4821333
+344 1 4842666
+342 1 4864000
+336 1 4885333
+326 1 4906666
+364 1 4928000
+350 1 4949333
+350 1 4970666
+363 1 4992000
+358 1 5013333
+305 1 5034666
+344 1 5056000
+346 1 5077333
+342 1 5098666
+330 1 5120000
+318 1 5141333
+361 1 5162666
+354 1 5184000
+313 1 5205333
+330 1 5226666
+350 1 5248000
+347 1 5269333
+346 1 5290666
+357 1 5312000
+325 1 5333333
+335 1 5354666
+331 1 5376000
+366 1 5397333
+329 1 5418666
+349 1 5440000
+371 1 5461333
+326 1 5482666
+333 1 5504000
+319 1 5525333
+327 1 5546666
+353 1 5568000
+356 1 5589333
+348 1 5610666
+338 1 5632000
+331 1 5653333
+341 1 5674666
+362 1 5696000
+326 1 5717333
+359 1 5738666
+315 1 5760000
+376 1 5781333
+343 1 5802666
+354 1 5824000
+353 1 5845333
+344 1 5866666
+334 1 5888000
+345 1 5909333
+355 1 5930666
+322 1 5952000
+334 1 5973333
+353 1 5994666
+338 1 6016000
+351 1 6037333
+334 1 6058666
+339 1 6080000
+345 1 6101333
+347 1 6122666
+355 1 6144000
+312 1 6165333
+352 1 6186666
+354 1 6208000
+318 1 6229333
+344 1 6250666
+363 1 6272000
+321 1 6293333
+339 1 6314666
+356 1 6336000
+334 1 6357333
+354 1 6378666
+325 1 6400000
+321 1 6421333
+341 1 6442666
+337 1 6464000
+351 1 6485333
+343 1 6506666
+341 1 6528000
+344 1 6549333
+341 1 6570666
+364 1 6592000
+319 1 6613333
+348 1 6634666
+332 1 6656000
+333 1 6677333
+343 1 6698666
+348 1 6720000
+347 1 6741333
+350 1 6762666
+342 1 6784000
+341 1 6805333
+326 1 6826666
+351 1 6848000
+329 1 6869333
+323 1 6890666
+350 1 6912000
+361 1 6933333
+326 1 6954666
+345 1 6976000
+345 1 6997333
+311 1 7018666
+349 1 7040000
+358 1 7061333
+352 1 7082666
+347 1 7104000
+364 1 7125333
+328 1 7146666
+318 1 7168000
+351 1 7189333
+340 1 7210666
+341 1 7232000
+355 1 7253333
+336 1 7274666
+352 1 7296000
+341 1 7317333
+334 1 7338666
+348 1 7360000
+342 1 7381333
+335 1 7402666
+342 1 7424000
+359 1 7445333
+349 1 7466666
+329 1 7488000
+356 1 7509333
+292 1 7530666
+316 1 7552000
+318 1 7573333
+320 1 7594666
+342 1 7616000
+285 1 7637333
+326 1 7658666
+352 1 7680000
+392 1 7701333
+364 1 7722666
+384 1 7744000
+334 1 7765333
+317 1 7786666
+326 1 7808000
+373 1 7829333
+354 1 7850666
+329 1 7872000
+347 1 7893333
+353 1 7914666
+338 1 7936000
+317 1 7957333
+354 1 7978666
+345 1 8000000
+350 1 8021333
+351 1 8042666
+332 1 8064000
+358 1 8085333
+315 1 8106666
+336 1 8128000
+358 1 8149333
+343 1 8170666
+319 1 8192000
+370 1 8213333
+344 1 8234666
+361 1 8256000
+343 1 8277333
+337 1 8298666
+354 1 8320000
+332 1 8341333
+348 1 8362666
+328 1 8384000
+345 1 8405333
+340 1 8426666
+346 1 8448000
+341 1 8469333
+344 1 8490666
+342 1 8512000
+341 1 8533333
+345 1 8554666
+337 1 8576000
+335 1 8597333
+335 1 8618666
+340 1 8640000
+345 1 8661333
+341 1 8682666
+342 1 8704000
+338 1 8725333
+343 1 8746666
+336 1 8768000
+338 1 8789333
+353 1 8810666
+339 1 8832000
+329 1 8853333
+349 1 8874666
+323 1 8896000
+351 1 8917333
+359 1 8938666
+357 1 8960000
+341 1 8981333
+333 1 9002666
+335 1 9024000
+328 1 9045333
+347 1 9066666
+343 1 9088000
+369 1 9109333
+331 1 9130666
+344 1 9152000
+330 1 9173333
+346 1 9194666
+337 1 9216000
+341 1 9237333
+338 1 9258666
+329 1 9280000
+360 1 9301333
+336 1 9322666
+341 1 9344000
+341 1 9365333
+345 1 9386666
+351 1 9408000
+349 1 9429333
+336 1 9450666
+326 1 9472000
+349 1 9493333
+343 1 9514666
+357 1 9536000
+342 1 9557333
+325 1 9578666
+346 1 9600000
+326 1 9621333
+402 1 9642666
+331 1 9664000
+339 1 9685333
+371 1 9706666
+314 1 9728000
+310 1 9749333
+364 1 9770666
+338 1 9792000
+339 1 9813333
+337 1 9834666
+355 1 9856000
+351 1 9877333
+332 1 9898666
+316 1 9920000
+474 1 9941333
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info
new file mode 100644
index 0000000..c420009
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info
@@ -0,0 +1,460 @@
+41 1 0
+41 1 20000
+82 1 40000
+82 1 80000
+82 1 120000
+82 1 160000
+82 1 200000
+82 1 240000
+82 1 280000
+82 1 320000
+82 1 360000
+123 1 400000
+123 1 460000
+123 1 520000
+123 1 580000
+123 1 640000
+164 1 700000
+164 1 780000
+164 1 860000
+164 1 940000
+164 1 1020000
+41 1 1100000
+41 1 1120000
+41 1 1140000
+41 1 1160000
+41 1 1180000
+41 1 1200000
+41 1 1220000
+41 1 1240000
+41 1 1260000
+41 1 1280000
+41 1 1300000
+41 1 1320000
+41 1 1340000
+41 1 1360000
+41 1 1380000
+41 1 1400000
+41 1 1420000
+41 1 1440000
+41 1 1460000
+41 1 1480000
+41 1 1500000
+41 1 1520000
+41 1 1540000
+41 1 1560000
+41 1 1580000
+41 1 1600000
+41 1 1620000
+41 1 1640000
+41 1 1660000
+41 1 1680000
+41 1 1700000
+41 1 1720000
+41 1 1740000
+41 1 1760000
+41 1 1780000
+41 1 1800000
+41 1 1820000
+41 1 1840000
+41 1 1860000
+41 1 1880000
+41 1 1900000
+41 1 1920000
+41 1 1940000
+41 1 1960000
+41 1 1980000
+41 1 2000000
+41 1 2020000
+41 1 2040000
+41 1 2060000
+41 1 2080000
+41 1 2100000
+41 1 2120000
+41 1 2140000
+41 1 2160000
+41 1 2180000
+41 1 2200000
+41 1 2220000
+41 1 2240000
+41 1 2260000
+41 1 2280000
+41 1 2300000
+41 1 2320000
+41 1 2340000
+41 1 2360000
+41 1 2380000
+41 1 2400000
+41 1 2420000
+41 1 2440000
+41 1 2460000
+41 1 2480000
+41 1 2500000
+41 1 2520000
+41 1 2540000
+41 1 2560000
+41 1 2580000
+41 1 2600000
+41 1 2620000
+41 1 2640000
+41 1 2660000
+41 1 2680000
+41 1 2700000
+41 1 2720000
+41 1 2740000
+41 1 2760000
+41 1 2780000
+41 1 2800000
+41 1 2820000
+41 1 2840000
+41 1 2860000
+41 1 2880000
+41 1 2900000
+41 1 2920000
+41 1 2940000
+41 1 2960000
+41 1 2980000
+41 1 3000000
+41 1 3020000
+41 1 3040000
+41 1 3060000
+41 1 3080000
+41 1 3100000
+41 1 3120000
+41 1 3140000
+41 1 3160000
+41 1 3180000
+41 1 3200000
+41 1 3220000
+41 1 3240000
+41 1 3260000
+41 1 3280000
+41 1 3300000
+41 1 3320000
+41 1 3340000
+41 1 3360000
+41 1 3380000
+41 1 3400000
+41 1 3420000
+41 1 3440000
+41 1 3460000
+41 1 3480000
+41 1 3500000
+41 1 3520000
+41 1 3540000
+41 1 3560000
+41 1 3580000
+41 1 3600000
+41 1 3620000
+41 1 3640000
+41 1 3660000
+41 1 3680000
+41 1 3700000
+41 1 3720000
+41 1 3740000
+41 1 3760000
+41 1 3780000
+41 1 3800000
+41 1 3820000
+41 1 3840000
+41 1 3860000
+41 1 3880000
+41 1 3900000
+41 1 3920000
+41 1 3940000
+41 1 3960000
+41 1 3980000
+41 1 4000000
+41 1 4020000
+41 1 4040000
+41 1 4060000
+41 1 4080000
+41 1 4100000
+41 1 4120000
+41 1 4140000
+41 1 4160000
+41 1 4180000
+41 1 4200000
+41 1 4220000
+41 1 4240000
+41 1 4260000
+41 1 4280000
+41 1 4300000
+41 1 4320000
+41 1 4340000
+41 1 4360000
+41 1 4380000
+41 1 4400000
+41 1 4420000
+41 1 4440000
+41 1 4460000
+41 1 4480000
+41 1 4500000
+41 1 4520000
+41 1 4540000
+41 1 4560000
+41 1 4580000
+41 1 4600000
+41 1 4620000
+41 1 4640000
+41 1 4660000
+41 1 4680000
+41 1 4700000
+41 1 4720000
+41 1 4740000
+41 1 4760000
+41 1 4780000
+41 1 4800000
+41 1 4820000
+41 1 4840000
+41 1 4860000
+41 1 4880000
+41 1 4900000
+41 1 4920000
+41 1 4940000
+41 1 4960000
+41 1 4980000
+41 1 5000000
+41 1 5020000
+41 1 5040000
+41 1 5060000
+41 1 5080000
+41 1 5100000
+41 1 5120000
+41 1 5140000
+41 1 5160000
+41 1 5180000
+41 1 5200000
+41 1 5220000
+41 1 5240000
+41 1 5260000
+41 1 5280000
+41 1 5300000
+41 1 5320000
+41 1 5340000
+41 1 5360000
+41 1 5380000
+41 1 5400000
+41 1 5420000
+41 1 5440000
+41 1 5460000
+41 1 5480000
+41 1 5500000
+41 1 5520000
+41 1 5540000
+41 1 5560000
+41 1 5580000
+41 1 5600000
+41 1 5620000
+41 1 5640000
+41 1 5660000
+41 1 5680000
+41 1 5700000
+41 1 5720000
+41 1 5740000
+41 1 5760000
+41 1 5780000
+41 1 5800000
+41 1 5820000
+41 1 5840000
+41 1 5860000
+41 1 5880000
+41 1 5900000
+41 1 5920000
+41 1 5940000
+41 1 5960000
+41 1 5980000
+41 1 6000000
+41 1 6020000
+41 1 6040000
+41 1 6060000
+41 1 6080000
+41 1 6100000
+41 1 6120000
+41 1 6140000
+41 1 6160000
+41 1 6180000
+41 1 6200000
+41 1 6220000
+41 1 6240000
+41 1 6260000
+41 1 6280000
+41 1 6300000
+41 1 6320000
+41 1 6340000
+41 1 6360000
+41 1 6380000
+41 1 6400000
+41 1 6420000
+41 1 6440000
+41 1 6460000
+41 1 6480000
+41 1 6500000
+41 1 6520000
+41 1 6540000
+41 1 6560000
+41 1 6580000
+41 1 6600000
+41 1 6620000
+41 1 6640000
+41 1 6660000
+41 1 6680000
+41 1 6700000
+41 1 6720000
+41 1 6740000
+41 1 6760000
+41 1 6780000
+41 1 6800000
+41 1 6820000
+41 1 6840000
+41 1 6860000
+41 1 6880000
+41 1 6900000
+41 1 6920000
+41 1 6940000
+41 1 6960000
+41 1 6980000
+41 1 7000000
+41 1 7020000
+41 1 7040000
+41 1 7060000
+41 1 7080000
+41 1 7100000
+41 1 7120000
+41 1 7140000
+41 1 7160000
+41 1 7180000
+41 1 7200000
+41 1 7220000
+41 1 7240000
+41 1 7260000
+41 1 7280000
+41 1 7300000
+41 1 7320000
+41 1 7340000
+41 1 7360000
+41 1 7380000
+41 1 7400000
+41 1 7420000
+41 1 7440000
+41 1 7460000
+41 1 7480000
+41 1 7500000
+41 1 7520000
+41 1 7540000
+41 1 7560000
+41 1 7580000
+41 1 7600000
+41 1 7620000
+41 1 7640000
+41 1 7660000
+41 1 7680000
+41 1 7700000
+41 1 7720000
+41 1 7740000
+41 1 7760000
+41 1 7780000
+41 1 7800000
+41 1 7820000
+41 1 7840000
+41 1 7860000
+41 1 7880000
+41 1 7900000
+41 1 7920000
+41 1 7940000
+41 1 7960000
+41 1 7980000
+41 1 8000000
+41 1 8020000
+41 1 8040000
+41 1 8060000
+41 1 8080000
+41 1 8100000
+41 1 8120000
+41 1 8140000
+41 1 8160000
+41 1 8180000
+41 1 8200000
+41 1 8220000
+41 1 8240000
+41 1 8260000
+41 1 8280000
+41 1 8300000
+41 1 8320000
+41 1 8340000
+41 1 8360000
+41 1 8380000
+41 1 8400000
+41 1 8420000
+41 1 8440000
+41 1 8460000
+41 1 8480000
+41 1 8500000
+41 1 8520000
+41 1 8540000
+41 1 8560000
+41 1 8580000
+41 1 8600000
+41 1 8620000
+41 1 8640000
+41 1 8660000
+41 1 8680000
+41 1 8700000
+41 1 8720000
+41 1 8740000
+41 1 8760000
+41 1 8780000
+41 1 8800000
+41 1 8820000
+41 1 8840000
+41 1 8860000
+41 1 8880000
+41 1 8900000
+41 1 8920000
+41 1 8940000
+41 1 8960000
+41 1 8980000
+41 1 9000000
+41 1 9020000
+41 1 9040000
+41 1 9060000
+41 1 9080000
+41 1 9100000
+41 1 9120000
+41 1 9140000
+41 1 9160000
+41 1 9180000
+41 1 9200000
+41 1 9220000
+41 1 9240000
+41 1 9260000
+41 1 9280000
+41 1 9300000
+41 1 9320000
+41 1 9340000
+41 1 9360000
+41 1 9380000
+41 1 9400000
+41 1 9420000
+41 1 9440000
+41 1 9460000
+41 1 9480000
+41 1 9500000
+41 1 9520000
+41 1 9540000
+41 1 9560000
+41 1 9580000
+41 1 9600000
+41 1 9620000
+41 1 9640000
+41 1 9660000
+41 1 9680000
+41 1 9700000
+41 1 9720000
+41 1 9740000
+41 1 9760000
+41 1 9780000
+41 1 9800000
+41 1 9820000
+41 1 9840000
+41 1 9860000
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.av1 b/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.av1
new file mode 100644
index 0000000..1d67af9
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.av1
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.info b/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.info
new file mode 100644
index 0000000..cc51168
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.info
@@ -0,0 +1,300 @@
+6027 1 0
+6879 0 33000
+5 0 66000
+532 0 100000
+5 0 133000
+2458 0 166000
+5 0 200000
+475 0 233000
+5 0 266000
+1262 0 300000
+5 0 333000
+554 0 366000
+27 0 400000
+6971 0 433000
+5 0 466000
+601 0 500000
+5 0 533000
+3276 0 566000
+5 0 600000
+658 0 633000
+5 0 666000
+1680 0 700000
+5 0 733000
+610 0 766000
+24 0 800000
+6728 0 833000
+5 0 866000
+764 0 900000
+5 0 933000
+2656 0 966000
+5 0 1000000
+462 0 1033000
+5 0 1066000
+1459 0 1100000
+5 0 1133000
+608 0 1166000
+24 0 1200000
+7038 0 1233000
+5 0 1266000
+721 0 1300000
+5 0 1333000
+3102 0 1366000
+5 0 1400000
+752 0 1433000
+5 0 1466000
+1815 0 1500000
+5 0 1533000
+755 0 1566000
+25 0 1600000
+7657 0 1633000
+5 0 1666000
+852 0 1700000
+5 0 1733000
+3537 0 1766000
+5 0 1800000
+673 0 1833000
+5 0 1866000
+1774 0 1900000
+5 0 1933000
+554 0 1966000
+24 0 2000000
+8028 0 2033000
+5 0 2066000
+715 0 2100000
+5 0 2133000
+3395 0 2166000
+5 0 2200000
+736 0 2233000
+5 0 2266000
+1759 0 2300000
+5 0 2333000
+605 0 2366000
+23 0 2400000
+7651 0 2433000
+5 0 2466000
+619 0 2500000
+5 0 2533000
+2788 0 2566000
+5 0 2600000
+556 0 2633000
+5 0 2666000
+1335 0 2700000
+5 0 2733000
+521 0 2766000
+24 0 2800000
+2274 0 2833000
+676 0 2866000
+25 0 2900000
+6224 0 2933000
+5798 0 2966000
+5 0 3000000
+448 0 3033000
+5 0 3066000
+1950 0 3100000
+5 0 3133000
+386 0 3166000
+5 0 3200000
+1218 0 3233000
+5 0 3266000
+1316 0 3300000
+5 0 3333000
+580 0 3366000
+26 0 3400000
+6673 0 3433000
+5 0 3466000
+473 0 3500000
+5 0 3533000
+2467 0 3566000
+5 0 3600000
+429 0 3633000
+5 0 3666000
+1420 0 3700000
+5 0 3733000
+583 0 3766000
+29 0 3800000
+8492 0 3833000
+5 0 3866000
+720 0 3900000
+5 0 3933000
+3635 0 3966000
+5 0 4000000
+621 0 4033000
+5 0 4066000
+1969 0 4100000
+5 0 4133000
+49 0 4166000
+25 0 4200000
+7416 0 4233000
+5 0 4266000
+947 0 4300000
+5 0 4333000
+3713 0 4366000
+5 0 4400000
+714 0 4433000
+5 0 4466000
+2003 0 4500000
+5 0 4533000
+750 0 4566000
+25 0 4600000
+8470 0 4633000
+5 0 4666000
+737 0 4700000
+5 0 4733000
+4094 0 4766000
+5 0 4800000
+1019 0 4833000
+5 0 4866000
+2160 0 4900000
+5 0 4933000
+828 0 4966000
+24 0 5000000
+9282 0 5033000
+5 0 5066000
+655 0 5100000
+5 0 5133000
+3491 0 5166000
+5 0 5200000
+651 0 5233000
+5 0 5266000
+1906 0 5300000
+5 0 5333000
+662 0 5366000
+24 0 5400000
+9724 0 5433000
+5 0 5466000
+617 0 5500000
+5 0 5533000
+3145 0 5566000
+5 0 5600000
+578 0 5633000
+5 0 5666000
+1592 0 5700000
+5 0 5733000
+569 0 5766000
+25 0 5800000
+10015 0 5833000
+5 0 5866000
+609 0 5900000
+5 0 5933000
+3618 0 5966000
+5 0 6000000
+734 0 6033000
+5 0 6066000
+1748 0 6100000
+5 0 6133000
+550 0 6166000
+24 0 6200000
+8806 0 6233000
+5 0 6266000
+498 0 6300000
+5 0 6333000
+2913 0 6366000
+5 0 6400000
+597 0 6433000
+5 0 6466000
+1235 0 6500000
+5 0 6533000
+362 0 6566000
+24 0 6600000
+6592 0 6633000
+5 0 6666000
+468 0 6700000
+5 0 6733000
+1920 0 6766000
+5 0 6800000
+419 0 6833000
+5 0 6866000
+843 0 6900000
+5 0 6933000
+237 0 6966000
+24 0 7000000
+2687 0 7033000
+5 0 7066000
+399 0 7100000
+5 0 7133000
+200 0 7166000
+143 0 7200000
+25 0 7233000
+12603 0 7266000
+1139 0 7300000
+5 0 7333000
+56 0 7366000
+5 0 7400000
+273 0 7433000
+5 0 7466000
+48 0 7500000
+5 0 7533000
+102 0 7566000
+5 0 7600000
+39 0 7633000
+24 0 7666000
+3635 0 7700000
+5 0 7733000
+46 0 7766000
+5 0 7800000
+647 0 7833000
+5 0 7866000
+61 0 7900000
+5 0 7933000
+824 0 7966000
+5 0 8000000
+691 0 8033000
+27 0 8066000
+4573 0 8100000
+5 0 8133000
+473 0 8166000
+5 0 8200000
+1637 0 8233000
+5 0 8266000
+451 0 8300000
+5 0 8333000
+969 0 8366000
+5 0 8400000
+234 0 8433000
+24 0 8466000
+3361 0 8500000
+5 0 8533000
+168 0 8566000
+5 0 8600000
+662 0 8633000
+5 0 8666000
+129 0 8700000
+5 0 8733000
+443 0 8766000
+5 0 8800000
+183 0 8833000
+23 0 8866000
+2769 0 8900000
+5 0 8933000
+182 0 8966000
+5 0 9000000
+890 0 9033000
+5 0 9066000
+171 0 9100000
+5 0 9133000
+599 0 9166000
+5 0 9200000
+236 0 9233000
+23 0 9266000
+2316 0 9300000
+5 0 9333000
+333 0 9366000
+5 0 9400000
+759 0 9433000
+5 0 9466000
+210 0 9500000
+5 0 9533000
+324 0 9566000
+5 0 9600000
+98 0 9633000
+23 0 9666000
+1107 0 9700000
+5 0 9733000
+42 0 9766000
+5 0 9800000
+145 0 9833000
+5 0 9866000
+109 0 9900000
+89 0 9933000
+24 0 9966000
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.av1 b/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.av1
new file mode 100644
index 0000000..529bace
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.av1
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.info b/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.info
new file mode 100644
index 0000000..fca7833
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.info
@@ -0,0 +1,167 @@
+12571 1 0
+9881 0 33000
+5 0 66000
+544 0 100000
+5 0 133000
+2642 0 166000
+5 0 200000
+531 0 233000
+5 0 266000
+1359 0 300000
+5 0 333000
+551 0 366000
+28 0 400000
+10791 0 433000
+5 0 466000
+655 0 500000
+5 0 533000
+3769 0 566000
+5 0 600000
+711 0 633000
+5 0 666000
+2004 0 700000
+5 0 733000
+657 0 766000
+26 0 800000
+8969 0 833000
+5 0 866000
+630 0 900000
+5 0 933000
+2787 0 966000
+5 0 1000000
+404 0 1033000
+5 0 1066000
+1518 0 1100000
+5 0 1133000
+493 0 1166000
+26 0 1200000
+9900 0 1233000
+5 0 1266000
+620 0 1300000
+5 0 1333000
+3072 0 1366000
+5 0 1400000
+668 0 1433000
+5 0 1466000
+1821 0 1500000
+5 0 1533000
+682 0 1566000
+26 0 1600000
+9560 0 1633000
+5 0 1666000
+667 0 1700000
+5 0 1733000
+3249 0 1766000
+5 0 1800000
+589 0 1833000
+5 0 1866000
+1816 0 1900000
+5 0 1933000
+548 0 1966000
+26 0 2000000
+9916 0 2033000
+5 0 2066000
+628 0 2100000
+5 0 2133000
+3034 0 2166000
+5 0 2200000
+590 0 2233000
+5 0 2266000
+1581 0 2300000
+5 0 2333000
+524 0 2366000
+26 0 2400000
+8182 0 2433000
+5 0 2466000
+552 0 2500000
+5 0 2533000
+2412 0 2566000
+5 0 2600000
+489 0 2633000
+5 0 2666000
+1227 0 2700000
+5 0 2733000
+432 0 2766000
+26 0 2800000
+2017 0 2833000
+516 0 2866000
+26 0 2900000
+16619 0 2933000
+6710 0 2966000
+5 0 3000000
+425 0 3033000
+5 0 3066000
+1964 0 3100000
+5 0 3133000
+386 0 3166000
+5 0 3200000
+1129 0 3233000
+5 0 3266000
+1156 0 3300000
+5 0 3333000
+486 0 3366000
+28 0 3400000
+10304 0 3433000
+5 0 3466000
+412 0 3500000
+5 0 3533000
+2608 0 3566000
+5 0 3600000
+397 0 3633000
+5 0 3666000
+1514 0 3700000
+5 0 3733000
+533 0 3766000
+26 0 3800000
+11698 0 3833000
+5 0 3866000
+542 0 3900000
+5 0 3933000
+3334 0 3966000
+5 0 4000000
+547 0 4033000
+5 0 4066000
+1809 0 4100000
+5 0 4133000
+55 0 4166000
+26 0 4200000
+9496 0 4233000
+5 0 4266000
+658 0 4300000
+5 0 4333000
+3232 0 4366000
+5 0 4400000
+600 0 4433000
+5 0 4466000
+1766 0 4500000
+5 0 4533000
+550 0 4566000
+25 0 4600000
+9951 0 4633000
+5 0 4666000
+624 0 4700000
+5 0 4733000
+3617 0 4766000
+5 0 4800000
+644 0 4833000
+5 0 4866000
+1841 0 4900000
+5 0 4933000
+649 0 4966000
+25 0 5000000
+9901 0 5033000
+5 0 5066000
+515 0 5100000
+5 0 5133000
+2814 0 5166000
+5 0 5200000
+511 0 5233000
+5 0 5266000
+1521 0 5300000
+5 0 5333000
+509 0 5366000
+26 0 5400000
+10579 0 5433000
+5 0 5466000
+575 0 5500000
+5 0 5533000
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info
new file mode 100644
index 0000000..575c75f
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info
@@ -0,0 +1,385 @@
+576 1 0
+576 1 24000
+1152 1 48000
+1152 1 96000
+1152 1 144000
+1152 1 192000
+1728 1 240000
+1728 1 312000
+1728 1 384000
+1728 1 456000
+1728 1 528000
+2304 1 600000
+2304 1 696000
+2304 1 792000
+2304 1 888000
+2304 1 984000
+576 1 1080000
+576 1 1104000
+576 1 1128000
+576 1 1152000
+576 1 1176000
+576 1 1200000
+576 1 1224000
+576 1 1248000
+576 1 1272000
+576 1 1296000
+576 1 1320000
+576 1 1344000
+576 1 1368000
+576 1 1392000
+576 1 1416000
+576 1 1440000
+576 1 1464000
+576 1 1488000
+576 1 1512000
+576 1 1536000
+576 1 1560000
+576 1 1584000
+576 1 1608000
+576 1 1632000
+576 1 1656000
+576 1 1680000
+576 1 1704000
+576 1 1728000
+576 1 1752000
+576 1 1776000
+576 1 1800000
+576 1 1824000
+576 1 1848000
+576 1 1872000
+576 1 1896000
+576 1 1920000
+576 1 1944000
+576 1 1968000
+576 1 1992000
+576 1 2016000
+576 1 2040000
+576 1 2064000
+576 1 2088000
+576 1 2112000
+576 1 2136000
+576 1 2160000
+576 1 2184000
+576 1 2208000
+576 1 2232000
+576 1 2256000
+576 1 2280000
+576 1 2304000
+576 1 2328000
+576 1 2352000
+576 1 2376000
+576 1 2400000
+576 1 2424000
+576 1 2448000
+576 1 2472000
+576 1 2496000
+576 1 2520000
+576 1 2544000
+576 1 2568000
+576 1 2592000
+576 1 2616000
+576 1 2640000
+576 1 2664000
+576 1 2688000
+576 1 2712000
+576 1 2736000
+576 1 2760000
+576 1 2784000
+576 1 2808000
+576 1 2832000
+576 1 2856000
+576 1 2880000
+576 1 2904000
+576 1 2928000
+576 1 2952000
+576 1 2976000
+576 1 3000000
+576 1 3024000
+576 1 3048000
+576 1 3072000
+576 1 3096000
+576 1 3120000
+576 1 3144000
+576 1 3168000
+576 1 3192000
+576 1 3216000
+576 1 3240000
+576 1 3264000
+576 1 3288000
+576 1 3312000
+576 1 3336000
+576 1 3360000
+576 1 3384000
+576 1 3408000
+576 1 3432000
+576 1 3456000
+576 1 3480000
+576 1 3504000
+576 1 3528000
+576 1 3552000
+576 1 3576000
+576 1 3600000
+576 1 3624000
+576 1 3648000
+576 1 3672000
+576 1 3696000
+576 1 3720000
+576 1 3744000
+576 1 3768000
+576 1 3792000
+576 1 3816000
+576 1 3840000
+576 1 3864000
+576 1 3888000
+576 1 3912000
+576 1 3936000
+576 1 3960000
+576 1 3984000
+576 1 4008000
+576 1 4032000
+576 1 4056000
+576 1 4080000
+576 1 4104000
+576 1 4128000
+576 1 4152000
+576 1 4176000
+576 1 4200000
+576 1 4224000
+576 1 4248000
+576 1 4272000
+576 1 4296000
+576 1 4320000
+576 1 4344000
+576 1 4368000
+576 1 4392000
+576 1 4416000
+576 1 4440000
+576 1 4464000
+576 1 4488000
+576 1 4512000
+576 1 4536000
+576 1 4560000
+576 1 4584000
+576 1 4608000
+576 1 4632000
+576 1 4656000
+576 1 4680000
+576 1 4704000
+576 1 4728000
+576 1 4752000
+576 1 4776000
+576 1 4800000
+576 1 4824000
+576 1 4848000
+576 1 4872000
+576 1 4896000
+576 1 4920000
+576 1 4944000
+576 1 4968000
+576 1 4992000
+576 1 5016000
+576 1 5040000
+576 1 5064000
+576 1 5088000
+576 1 5112000
+576 1 5136000
+576 1 5160000
+576 1 5184000
+576 1 5208000
+576 1 5232000
+576 1 5256000
+576 1 5280000
+576 1 5304000
+576 1 5328000
+576 1 5352000
+576 1 5376000
+576 1 5400000
+576 1 5424000
+576 1 5448000
+576 1 5472000
+576 1 5496000
+576 1 5520000
+576 1 5544000
+576 1 5568000
+576 1 5592000
+576 1 5616000
+576 1 5640000
+576 1 5664000
+576 1 5688000
+576 1 5712000
+576 1 5736000
+576 1 5760000
+576 1 5784000
+576 1 5808000
+576 1 5832000
+576 1 5856000
+576 1 5880000
+576 1 5904000
+576 1 5928000
+576 1 5952000
+576 1 5976000
+576 1 6000000
+576 1 6024000
+576 1 6048000
+576 1 6072000
+576 1 6096000
+576 1 6120000
+576 1 6144000
+576 1 6168000
+576 1 6192000
+576 1 6216000
+576 1 6240000
+576 1 6264000
+576 1 6288000
+576 1 6312000
+576 1 6336000
+576 1 6360000
+576 1 6384000
+576 1 6408000
+576 1 6432000
+576 1 6456000
+576 1 6480000
+576 1 6504000
+576 1 6528000
+576 1 6552000
+576 1 6576000
+576 1 6600000
+576 1 6624000
+576 1 6648000
+576 1 6672000
+576 1 6696000
+576 1 6720000
+576 1 6744000
+576 1 6768000
+576 1 6792000
+576 1 6816000
+576 1 6840000
+576 1 6864000
+576 1 6888000
+576 1 6912000
+576 1 6936000
+576 1 6960000
+576 1 6984000
+576 1 7008000
+576 1 7032000
+576 1 7056000
+576 1 7080000
+576 1 7104000
+576 1 7128000
+576 1 7152000
+576 1 7176000
+576 1 7200000
+576 1 7224000
+576 1 7248000
+576 1 7272000
+576 1 7296000
+576 1 7320000
+576 1 7344000
+576 1 7368000
+576 1 7392000
+576 1 7416000
+576 1 7440000
+576 1 7464000
+576 1 7488000
+576 1 7512000
+576 1 7536000
+576 1 7560000
+576 1 7584000
+576 1 7608000
+576 1 7632000
+576 1 7656000
+576 1 7680000
+576 1 7704000
+576 1 7728000
+576 1 7752000
+576 1 7776000
+576 1 7800000
+576 1 7824000
+576 1 7848000
+576 1 7872000
+576 1 7896000
+576 1 7920000
+576 1 7944000
+576 1 7968000
+576 1 7992000
+576 1 8016000
+576 1 8040000
+576 1 8064000
+576 1 8088000
+576 1 8112000
+576 1 8136000
+576 1 8160000
+576 1 8184000
+576 1 8208000
+576 1 8232000
+576 1 8256000
+576 1 8280000
+576 1 8304000
+576 1 8328000
+576 1 8352000
+576 1 8376000
+576 1 8400000
+576 1 8424000
+576 1 8448000
+576 1 8472000
+576 1 8496000
+576 1 8520000
+576 1 8544000
+576 1 8568000
+576 1 8592000
+576 1 8616000
+576 1 8640000
+576 1 8664000
+576 1 8688000
+576 1 8712000
+576 1 8736000
+576 1 8760000
+576 1 8784000
+576 1 8808000
+576 1 8832000
+576 1 8856000
+576 1 8880000
+576 1 8904000
+576 1 8928000
+576 1 8952000
+576 1 8976000
+576 1 9000000
+576 1 9024000
+576 1 9048000
+576 1 9072000
+576 1 9096000
+576 1 9120000
+576 1 9144000
+576 1 9168000
+576 1 9192000
+576 1 9216000
+576 1 9240000
+576 1 9264000
+576 1 9288000
+576 1 9312000
+576 1 9336000
+576 1 9360000
+576 1 9384000
+576 1 9408000
+576 1 9432000
+576 1 9456000
+576 1 9480000
+576 1 9504000
+576 1 9528000
+576 1 9552000
+576 1 9576000
+576 1 9600000
+576 1 9624000
+576 1 9648000
+576 1 9672000
+576 1 9696000
+576 1 9720000
+576 1 9744000
+576 1 9768000
+576 1 9792000
+576 1 9816000
+576 1 9840000
+576 1 9864000
+576 1 9888000
+576 1 9912000
diff --git a/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info
new file mode 100644
index 0000000..0176eaf4
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info
@@ -0,0 +1,807 @@
+32 1 0
+32 1 20000
+64 1 40000
+64 1 80000
+64 1 120000
+96 1 160000
+96 1 220000
+96 1 280000
+96 1 340000
+128 1 400000
+128 1 480000
+128 1 560000
+128 1 640000
+128 1 720000
+32 1 800000
+32 1 820000
+32 1 840000
+32 1 860000
+32 1 880000
+32 1 900000
+32 1 920000
+32 1 940000
+32 1 960000
+32 1 980000
+32 1 1000000
+32 1 1020000
+32 1 1040000
+32 1 1060000
+32 1 1080000
+32 1 1100000
+32 1 1120000
+32 1 1140000
+32 1 1160000
+32 1 1180000
+32 1 1200000
+32 1 1220000
+32 1 1240000
+32 1 1260000
+32 1 1280000
+32 1 1300000
+32 1 1320000
+32 1 1340000
+32 1 1360000
+32 1 1380000
+32 1 1400000
+32 1 1420000
+32 1 1440000
+32 1 1460000
+32 1 1480000
+32 1 1500000
+32 1 1520000
+32 1 1540000
+32 1 1560000
+32 1 1580000
+32 1 1600000
+32 1 1620000
+32 1 1640000
+32 1 1660000
+32 1 1680000
+32 1 1700000
+32 1 1720000
+32 1 1740000
+32 1 1760000
+32 1 1780000
+32 1 1800000
+32 1 1820000
+32 1 1840000
+32 1 1860000
+32 1 1880000
+32 1 1900000
+32 1 1920000
+32 1 1940000
+32 1 1960000
+32 1 1980000
+32 1 2000000
+32 1 2020000
+32 1 2040000
+32 1 2060000
+32 1 2080000
+32 1 2100000
+32 1 2120000
+32 1 2140000
+32 1 2160000
+32 1 2180000
+32 1 2200000
+32 1 2220000
+32 1 2240000
+32 1 2260000
+32 1 2280000
+32 1 2300000
+32 1 2320000
+32 1 2340000
+32 1 2360000
+32 1 2380000
+32 1 2400000
+32 1 2420000
+32 1 2440000
+32 1 2460000
+32 1 2480000
+32 1 2500000
+32 1 2520000
+32 1 2540000
+32 1 2560000
+32 1 2580000
+32 1 2600000
+32 1 2620000
+32 1 2640000
+32 1 2660000
+32 1 2680000
+32 1 2700000
+32 1 2720000
+32 1 2740000
+32 1 2760000
+32 1 2780000
+32 1 2800000
+32 1 2820000
+32 1 2840000
+32 1 2860000
+32 1 2880000
+32 1 2900000
+32 1 2920000
+32 1 2940000
+32 1 2960000
+32 1 2980000
+32 1 3000000
+32 1 3020000
+32 1 3040000
+32 1 3060000
+32 1 3080000
+32 1 3100000
+32 1 3120000
+32 1 3140000
+32 1 3160000
+32 1 3180000
+32 1 3200000
+32 1 3220000
+32 1 3240000
+32 1 3260000
+32 1 3280000
+32 1 3300000
+32 1 3320000
+32 1 3340000
+32 1 3360000
+32 1 3380000
+32 1 3400000
+32 1 3420000
+32 1 3440000
+32 1 3460000
+32 1 3480000
+32 1 3500000
+32 1 3520000
+32 1 3540000
+32 1 3560000
+32 1 3580000
+32 1 3600000
+32 1 3620000
+32 1 3640000
+32 1 3660000
+32 1 3680000
+32 1 3700000
+32 1 3720000
+32 1 3740000
+32 1 3760000
+32 1 3780000
+32 1 3800000
+32 1 3820000
+32 1 3840000
+32 1 3860000
+32 1 3880000
+32 1 3900000
+32 1 3920000
+32 1 3940000
+32 1 3960000
+32 1 3980000
+32 1 4000000
+32 1 4020000
+32 1 4040000
+32 1 4060000
+32 1 4080000
+32 1 4100000
+32 1 4120000
+32 1 4140000
+32 1 4160000
+32 1 4180000
+32 1 4200000
+32 1 4220000
+32 1 4240000
+32 1 4260000
+32 1 4280000
+32 1 4300000
+32 1 4320000
+32 1 4340000
+32 1 4360000
+32 1 4380000
+32 1 4400000
+32 1 4420000
+32 1 4440000
+32 1 4460000
+32 1 4480000
+32 1 4500000
+32 1 4520000
+32 1 4540000
+32 1 4560000
+32 1 4580000
+32 1 4600000
+32 1 4620000
+32 1 4640000
+32 1 4660000
+32 1 4680000
+32 1 4700000
+32 1 4720000
+32 1 4740000
+32 1 4760000
+32 1 4780000
+32 1 4800000
+32 1 4820000
+32 1 4840000
+32 1 4860000
+32 1 4880000
+32 1 4900000
+32 1 4920000
+32 1 4940000
+32 1 4960000
+32 1 4980000
+32 1 5000000
+32 1 5020000
+32 1 5040000
+32 1 5060000
+32 1 5080000
+32 1 5100000
+32 1 5120000
+32 1 5140000
+32 1 5160000
+32 1 5180000
+32 1 5200000
+32 1 5220000
+32 1 5240000
+32 1 5260000
+32 1 5280000
+32 1 5300000
+32 1 5320000
+32 1 5340000
+32 1 5360000
+32 1 5380000
+32 1 5400000
+32 1 5420000
+32 1 5440000
+32 1 5460000
+32 1 5480000
+32 1 5500000
+32 1 5520000
+32 1 5540000
+32 1 5560000
+32 1 5580000
+32 1 5600000
+32 1 5620000
+32 1 5640000
+32 1 5660000
+32 1 5680000
+32 1 5700000
+32 1 5720000
+32 1 5740000
+32 1 5760000
+32 1 5780000
+32 1 5800000
+32 1 5820000
+32 1 5840000
+32 1 5860000
+32 1 5880000
+32 1 5900000
+32 1 5920000
+32 1 5940000
+32 1 5960000
+32 1 5980000
+32 1 6000000
+32 1 6020000
+32 1 6040000
+32 1 6060000
+32 1 6080000
+32 1 6100000
+32 1 6120000
+32 1 6140000
+32 1 6160000
+32 1 6180000
+32 1 6200000
+32 1 6220000
+32 1 6240000
+32 1 6260000
+32 1 6280000
+32 1 6300000
+32 1 6320000
+32 1 6340000
+32 1 6360000
+32 1 6380000
+32 1 6400000
+32 1 6420000
+32 1 6440000
+32 1 6460000
+32 1 6480000
+32 1 6500000
+32 1 6520000
+32 1 6540000
+32 1 6560000
+32 1 6580000
+32 1 6600000
+32 1 6620000
+32 1 6640000
+32 1 6660000
+32 1 6680000
+32 1 6700000
+32 1 6720000
+32 1 6740000
+32 1 6760000
+32 1 6780000
+32 1 6800000
+32 1 6820000
+32 1 6840000
+32 1 6860000
+32 1 6880000
+32 1 6900000
+32 1 6920000
+32 1 6940000
+32 1 6960000
+32 1 6980000
+32 1 7000000
+32 1 7020000
+32 1 7040000
+32 1 7060000
+32 1 7080000
+32 1 7100000
+32 1 7120000
+32 1 7140000
+32 1 7160000
+32 1 7180000
+32 1 7200000
+32 1 7220000
+32 1 7240000
+32 1 7260000
+32 1 7280000
+32 1 7300000
+32 1 7320000
+32 1 7340000
+32 1 7360000
+32 1 7380000
+32 1 7400000
+32 1 7420000
+32 1 7440000
+32 1 7460000
+32 1 7480000
+32 1 7500000
+32 1 7520000
+32 1 7540000
+32 1 7560000
+32 1 7580000
+32 1 7600000
+32 1 7620000
+32 1 7640000
+32 1 7660000
+32 1 7680000
+32 1 7700000
+32 1 7720000
+32 1 7740000
+32 1 7760000
+32 1 7780000
+32 1 7800000
+32 1 7820000
+32 1 7840000
+32 1 7860000
+32 1 7880000
+32 1 7900000
+32 1 7920000
+32 1 7940000
+32 1 7960000
+32 1 7980000
+32 1 8000000
+32 1 8020000
+32 1 8040000
+32 1 8060000
+32 1 8080000
+32 1 8100000
+32 1 8120000
+32 1 8140000
+32 1 8160000
+32 1 8180000
+32 1 8200000
+32 1 8220000
+32 1 8240000
+32 1 8260000
+32 1 8280000
+32 1 8300000
+32 1 8320000
+32 1 8340000
+32 1 8360000
+32 1 8380000
+32 1 8400000
+32 1 8420000
+32 1 8440000
+32 1 8460000
+32 1 8480000
+32 1 8500000
+32 1 8520000
+32 1 8540000
+32 1 8560000
+32 1 8580000
+32 1 8600000
+32 1 8620000
+32 1 8640000
+32 1 8660000
+32 1 8680000
+32 1 8700000
+32 1 8720000
+32 1 8740000
+32 1 8760000
+32 1 8780000
+32 1 8800000
+32 1 8820000
+32 1 8840000
+32 1 8860000
+32 1 8880000
+32 1 8900000
+32 1 8920000
+32 1 8940000
+32 1 8960000
+32 1 8980000
+32 1 9000000
+32 1 9020000
+32 1 9040000
+32 1 9060000
+32 1 9080000
+32 1 9100000
+32 1 9120000
+32 1 9140000
+32 1 9160000
+32 1 9180000
+32 1 9200000
+32 1 9220000
+32 1 9240000
+32 1 9260000
+32 1 9280000
+32 1 9300000
+32 1 9320000
+32 1 9340000
+32 1 9360000
+32 1 9380000
+32 1 9400000
+32 1 9420000
+32 1 9440000
+32 1 9460000
+32 1 9480000
+32 1 9500000
+32 1 9520000
+32 1 9540000
+32 1 9560000
+32 1 9580000
+32 1 9600000
+32 1 9620000
+32 1 9640000
+32 1 9660000
+32 1 9680000
+32 1 9700000
+32 1 9720000
+32 1 9740000
+32 1 9760000
+32 1 9780000
+32 1 9800000
+32 1 9820000
+32 1 9840000
+32 1 9860000
+32 1 9880000
+32 1 9900000
+32 1 9920000
+32 1 9940000
+32 1 9960000
+32 1 9980000
+32 1 10000000
+32 1 10020000
+32 1 10040000
+32 1 10060000
+32 1 10080000
+32 1 10100000
+32 1 10120000
+32 1 10140000
+32 1 10160000
+32 1 10180000
+32 1 10200000
+32 1 10220000
+32 1 10240000
+32 1 10260000
+32 1 10280000
+32 1 10300000
+32 1 10320000
+32 1 10340000
+32 1 10360000
+32 1 10380000
+32 1 10400000
+32 1 10420000
+32 1 10440000
+32 1 10460000
+32 1 10480000
+32 1 10500000
+32 1 10520000
+32 1 10540000
+32 1 10560000
+32 1 10580000
+32 1 10600000
+32 1 10620000
+32 1 10640000
+32 1 10660000
+32 1 10680000
+32 1 10700000
+32 1 10720000
+32 1 10740000
+32 1 10760000
+32 1 10780000
+32 1 10800000
+32 1 10820000
+32 1 10840000
+32 1 10860000
+32 1 10880000
+32 1 10900000
+32 1 10920000
+32 1 10940000
+32 1 10960000
+32 1 10980000
+32 1 11000000
+32 1 11020000
+32 1 11040000
+32 1 11060000
+32 1 11080000
+32 1 11100000
+32 1 11120000
+32 1 11140000
+32 1 11160000
+32 1 11180000
+32 1 11200000
+32 1 11220000
+32 1 11240000
+32 1 11260000
+32 1 11280000
+32 1 11300000
+32 1 11320000
+32 1 11340000
+32 1 11360000
+32 1 11380000
+32 1 11400000
+32 1 11420000
+32 1 11440000
+32 1 11460000
+32 1 11480000
+32 1 11500000
+32 1 11520000
+32 1 11540000
+32 1 11560000
+32 1 11580000
+32 1 11600000
+32 1 11620000
+32 1 11640000
+32 1 11660000
+32 1 11680000
+32 1 11700000
+32 1 11720000
+32 1 11740000
+32 1 11760000
+32 1 11780000
+32 1 11800000
+32 1 11820000
+32 1 11840000
+32 1 11860000
+32 1 11880000
+32 1 11900000
+32 1 11920000
+32 1 11940000
+32 1 11960000
+32 1 11980000
+32 1 12000000
+32 1 12020000
+32 1 12040000
+32 1 12060000
+32 1 12080000
+32 1 12100000
+32 1 12120000
+32 1 12140000
+32 1 12160000
+32 1 12180000
+32 1 12200000
+32 1 12220000
+32 1 12240000
+32 1 12260000
+32 1 12280000
+32 1 12300000
+32 1 12320000
+32 1 12340000
+32 1 12360000
+32 1 12380000
+32 1 12400000
+32 1 12420000
+32 1 12440000
+32 1 12460000
+32 1 12480000
+32 1 12500000
+32 1 12520000
+32 1 12540000
+32 1 12560000
+32 1 12580000
+32 1 12600000
+32 1 12620000
+32 1 12640000
+32 1 12660000
+32 1 12680000
+32 1 12700000
+32 1 12720000
+32 1 12740000
+32 1 12760000
+32 1 12780000
+32 1 12800000
+32 1 12820000
+32 1 12840000
+32 1 12860000
+32 1 12880000
+32 1 12900000
+32 1 12920000
+32 1 12940000
+32 1 12960000
+32 1 12980000
+32 1 13000000
+32 1 13020000
+32 1 13040000
+32 1 13060000
+32 1 13080000
+32 1 13100000
+32 1 13120000
+32 1 13140000
+32 1 13160000
+32 1 13180000
+32 1 13200000
+32 1 13220000
+32 1 13240000
+32 1 13260000
+32 1 13280000
+32 1 13300000
+32 1 13320000
+32 1 13340000
+32 1 13360000
+32 1 13380000
+32 1 13400000
+32 1 13420000
+32 1 13440000
+32 1 13460000
+32 1 13480000
+32 1 13500000
+32 1 13520000
+32 1 13540000
+32 1 13560000
+32 1 13580000
+32 1 13600000
+32 1 13620000
+32 1 13640000
+32 1 13660000
+32 1 13680000
+32 1 13700000
+32 1 13720000
+32 1 13740000
+32 1 13760000
+32 1 13780000
+32 1 13800000
+32 1 13820000
+32 1 13840000
+32 1 13860000
+32 1 13880000
+32 1 13900000
+32 1 13920000
+32 1 13940000
+32 1 13960000
+32 1 13980000
+32 1 14000000
+32 1 14020000
+32 1 14040000
+32 1 14060000
+32 1 14080000
+32 1 14100000
+32 1 14120000
+32 1 14140000
+32 1 14160000
+32 1 14180000
+32 1 14200000
+32 1 14220000
+32 1 14240000
+32 1 14260000
+32 1 14280000
+32 1 14300000
+32 1 14320000
+32 1 14340000
+32 1 14360000
+32 1 14380000
+32 1 14400000
+32 1 14420000
+32 1 14440000
+32 1 14460000
+32 1 14480000
+32 1 14500000
+32 1 14520000
+32 1 14540000
+32 1 14560000
+32 1 14580000
+32 1 14600000
+32 1 14620000
+32 1 14640000
+32 1 14660000
+32 1 14680000
+32 1 14700000
+32 1 14720000
+32 1 14740000
+32 1 14760000
+32 1 14780000
+32 1 14800000
+32 1 14820000
+32 1 14840000
+32 1 14860000
+32 1 14880000
+32 1 14900000
+32 1 14920000
+32 1 14940000
+32 1 14960000
+32 1 14980000
+32 1 15000000
+32 1 15020000
+32 1 15040000
+32 1 15060000
+32 1 15080000
+32 1 15100000
+32 1 15120000
+32 1 15140000
+32 1 15160000
+32 1 15180000
+32 1 15200000
+32 1 15220000
+32 1 15240000
+32 1 15260000
+32 1 15280000
+32 1 15300000
+32 1 15320000
+32 1 15340000
+32 1 15360000
+32 1 15380000
+32 1 15400000
+32 1 15420000
+32 1 15440000
+32 1 15460000
+32 1 15480000
+32 1 15500000
+32 1 15520000
+32 1 15540000
+32 1 15560000
+32 1 15580000
+32 1 15600000
+32 1 15620000
+32 1 15640000
+32 1 15660000
+32 1 15680000
+32 1 15700000
+32 1 15720000
+32 1 15740000
+32 1 15760000
+32 1 15780000
+32 1 15800000
+32 1 15820000
+32 1 15840000
+32 1 15860000
+32 1 15880000
+32 1 15900000
+32 1 15920000
+32 1 15940000
+32 1 15960000
+32 1 15980000
+32 1 16000000
+32 1 16020000
+32 1 16040000
+32 1 16060000
+32 1 16080000
+32 1 16100000
+32 1 16120000
+32 1 16140000
+32 1 16160000
+32 1 16180000
+32 1 16200000
+32 1 16220000
+32 1 16240000
+32 1 16260000
+32 1 16280000
+32 1 16300000
+32 1 16320000
+32 1 16340000
+32 1 16360000
+32 1 16380000
+32 1 16400000
+32 1 16420000
+32 1 16440000
+32 1 16460000
+32 1 16480000
+32 1 16500000
+32 1 16520000
+32 1 16540000
+32 1 16560000
+32 1 16580000
+32 1 16600000
+32 1 16620000
+32 1 16640000
diff --git a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
index 8420cab..8cbb7a7 100644
--- a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
@@ -101,7 +101,7 @@
const StringToName kStringToName[] = {
{"h263", h263}, {"avc", avc}, {"mpeg2", mpeg2}, {"mpeg4", mpeg4},
- {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9},
+ {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9}, {"av1", av1},
};
const size_t kNumStringToName =
@@ -142,79 +142,48 @@
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
- // handle configuration changes in work done
- if (!work->worklets.empty() &&
- (work->worklets.front()->output.configUpdate.size() != 0)) {
- ALOGV("Config Update");
- std::vector<std::unique_ptr<C2Param>> updates =
- std::move(work->worklets.front()->output.configUpdate);
- std::vector<C2Param*> configParam;
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- for (size_t i = 0; i < updates.size(); ++i) {
- C2Param* param = updates[i].get();
- if (param->index() ==
- C2VideoSizeStreamInfo::output::PARAM_TYPE) {
- ALOGV("Received C2VideoSizeStreamInfo");
- configParam.push_back(param);
- }
- }
- mComponent->config(configParam, C2_DONT_BLOCK, &failures);
- ASSERT_EQ(failures.size(), 0u);
- }
+ if (!work->worklets.empty()) {
+ // For decoder components current timestamp always exceeds
+ // previous timestamp
+ typedef std::unique_lock<std::mutex> ULock;
+ bool codecConfig = ((work->worklets.front()->output.flags &
+ C2FrameData::FLAG_CODEC_CONFIG) != 0);
+ if (!codecConfig &&
+ !work->worklets.front()->output.buffers.empty()) {
+ EXPECT_GE(
+ (work->worklets.front()->output.ordinal.timestamp.peeku()),
+ mTimestampUs);
+ mTimestampUs =
+ work->worklets.front()->output.ordinal.timestamp.peeku();
- mFramesReceived++;
- mEos = (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM) != 0;
- auto frameIndexIt =
- std::find(mFlushedIndices.begin(), mFlushedIndices.end(),
- work->input.ordinal.frameIndex.peeku());
-
- // For decoder components current timestamp always exceeds
- // previous timestamp
- typedef std::unique_lock<std::mutex> ULock;
- bool codecConfig = ((work->worklets.front()->output.flags &
- C2FrameData::FLAG_CODEC_CONFIG) != 0);
- if (!codecConfig &&
- !work->worklets.front()->output.buffers.empty()) {
- EXPECT_GE(
- (work->worklets.front()->output.ordinal.timestamp.peeku()),
- mTimestampUs);
- mTimestampUs =
- work->worklets.front()->output.ordinal.timestamp.peeku();
-
- ULock l(mQueueLock);
- if (mTimestampDevTest) {
- bool tsHit = false;
- std::list<uint64_t>::iterator it = mTimestampUslist.begin();
- while (it != mTimestampUslist.end()) {
- if (*it == mTimestampUs) {
- mTimestampUslist.erase(it);
- tsHit = true;
- break;
+ ULock l(mQueueLock);
+ if (mTimestampDevTest) {
+ bool tsHit = false;
+ std::list<uint64_t>::iterator it = mTimestampUslist.begin();
+ while (it != mTimestampUslist.end()) {
+ if (*it == mTimestampUs) {
+ mTimestampUslist.erase(it);
+ tsHit = true;
+ break;
+ }
+ it++;
}
- it++;
- }
- if (tsHit == false) {
- if (mTimestampUslist.empty() == false) {
- EXPECT_EQ(tsHit, true)
- << "TimeStamp not recognized";
- } else {
- std::cout << "[ INFO ] Received non-zero "
- "output / TimeStamp not recognized \n";
+ if (tsHit == false) {
+ if (mTimestampUslist.empty() == false) {
+ EXPECT_EQ(tsHit, true)
+ << "TimeStamp not recognized";
+ } else {
+ std::cout << "[ INFO ] Received non-zero "
+ "output / TimeStamp not recognized \n";
+ }
}
}
}
- }
-
- work->input.buffers.clear();
- work->worklets.clear();
- {
- ULock l(mQueueLock);
- mWorkQueue.push_back(std::move(work));
- if (!mFlushedIndices.empty()) {
- mFlushedIndices.erase(frameIndexIt);
- }
- mQueueCondition.notify_all();
+ bool mCsd;
+ workDone(mComponent, work, mFlushedIndices, mQueueLock,
+ mQueueCondition, mWorkQueue, mEos, mCsd,
+ mFramesReceived);
+ (void)mCsd;
}
}
}
@@ -227,6 +196,7 @@
hevc,
vp8,
vp9,
+ av1,
unknown_comp,
};
@@ -341,6 +311,11 @@
"bbb_vp9_640x360_1600kbps_30fps.vp9"},
{"bbb_vp9_176x144_285kbps_60fps.info",
"bbb_vp9_640x360_1600kbps_30fps.info"}},
+ {Codec2VideoDecHidlTest::standardComp::av1,
+ {"bbb_av1_640_360.av1",
+ "bbb_av1_176_144.av1"},
+ {"bbb_av1_640_360.info",
+ "bbb_av1_176_144.info"}},
};
for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
@@ -352,11 +327,11 @@
}
}
-void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component> &component,
- std::mutex &queueLock, std::condition_variable &queueCondition,
- std::list<std::unique_ptr<C2Work>> &workQueue,
- std::list<uint64_t> &flushedIndices,
- std::shared_ptr<C2BlockPool> &linearPool,
+void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex &queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ std::list<uint64_t>& flushedIndices,
+ std::shared_ptr<C2BlockPool>& linearPool,
std::ifstream& eleStream,
android::Vector<FrameInfo>* Info,
int offset, int range, bool signalEOS = true) {
@@ -397,35 +372,37 @@
int size = (*Info)[frameID].bytesCount;
char* data = (char*)malloc(size);
+ ASSERT_NE(data, nullptr);
eleStream.read(data, size);
ASSERT_EQ(eleStream.gcount(), size);
- std::shared_ptr<C2LinearBlock> block;
- ASSERT_EQ(C2_OK,
- linearPool->fetchLinearBlock(
- size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
- &block));
- ASSERT_TRUE(block);
-
- // Write View
- C2WriteView view = block->map().get();
- if (view.error() != C2_OK) {
- fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
- break;
- }
- ASSERT_EQ((size_t)size, view.capacity());
- ASSERT_EQ(0u, view.offset());
- ASSERT_EQ((size_t)size, view.size());
-
- memcpy(view.base(), data, size);
-
work->input.buffers.clear();
- work->input.buffers.emplace_back(new LinearBuffer(block));
+ if (size) {
+ std::shared_ptr<C2LinearBlock> block;
+ ASSERT_EQ(C2_OK,
+ linearPool->fetchLinearBlock(
+ size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+ &block));
+ ASSERT_TRUE(block);
+
+ // Write View
+ C2WriteView view = block->map().get();
+ if (view.error() != C2_OK) {
+ fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
+ break;
+ }
+ ASSERT_EQ((size_t)size, view.capacity());
+ ASSERT_EQ(0u, view.offset());
+ ASSERT_EQ((size_t)size, view.size());
+
+ memcpy(view.base(), data, size);
+
+ work->input.buffers.emplace_back(new LinearBuffer(block));
+ free(data);
+ }
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
- free(data);
-
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
@@ -437,29 +414,6 @@
}
}
-void waitOnInputConsumption(std::mutex& queueLock,
- std::condition_variable& queueCondition,
- std::list<std::unique_ptr<C2Work>>& workQueue,
- size_t bufferCount = MAX_INPUT_BUFFERS) {
- typedef std::unique_lock<std::mutex> ULock;
- uint32_t queueSize;
- uint32_t maxRetry = 0;
- {
- ULock l(queueLock);
- queueSize = workQueue.size();
- }
- while ((maxRetry < MAX_RETRY) && (queueSize < bufferCount)) {
- ULock l(queueLock);
- if (queueSize != workQueue.size()) {
- queueSize = workQueue.size();
- maxRetry = 0;
- } else {
- queueCondition.wait_for(l, TIME_OUT);
- maxRetry++;
- }
- }
-}
-
TEST_F(Codec2VideoDecHidlTest, validateCompName) {
if (mDisableTest) return;
ALOGV("Checks if the given component is a valid video component");
@@ -467,17 +421,21 @@
ASSERT_EQ(mDisableTest, false);
}
+class Codec2VideoDecDecodeTest : public Codec2VideoDecHidlTest,
+ public ::testing::WithParamInterface<int32_t> {
+};
+
// Bitstream Test
-TEST_F(Codec2VideoDecHidlTest, DecodeTest) {
+TEST_P(Codec2VideoDecDecodeTest, DecodeTest) {
description("Decodes input file");
if (mDisableTest) return;
+ uint32_t streamIndex = GetParam();
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
-
strcpy(mURL, gEnv->getRes().c_str());
strcpy(info, gEnv->getRes().c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mCompName, mURL, info, streamIndex);
eleInfo.open(info);
ASSERT_EQ(eleInfo.is_open(), true) << mURL << " - file not found";
@@ -498,6 +456,9 @@
eleInfo.close();
ASSERT_EQ(mComponent->start(), C2_OK);
+ // Reset total no of frames received
+ mFramesReceived = 0;
+ mTimestampUs = 0;
ALOGV("mURL : %s", mURL);
eleStream.open(mURL, std::ifstream::binary);
ASSERT_EQ(eleStream.is_open(), true);
@@ -522,8 +483,11 @@
}
if (mTimestampDevTest) EXPECT_EQ(mTimestampUslist.empty(), true);
+ ASSERT_EQ(mComponent->stop(), C2_OK);
}
+INSTANTIATE_TEST_CASE_P(StreamIndexes, Codec2VideoDecDecodeTest,
+ ::testing::Values(0, 1));
// Adaptive Test
TEST_F(Codec2VideoDecHidlTest, AdaptiveDecodeTest) {
@@ -678,8 +642,8 @@
EXPECT_GE(mFramesReceived, 1U);
ASSERT_EQ(mEos, true);
ASSERT_EQ(mComponent->stop(), C2_OK);
- ASSERT_EQ(mComponent->release(), C2_OK);
}
+ ASSERT_EQ(mComponent->release(), C2_OK);
}
TEST_F(Codec2VideoDecHidlTest, EOSTest) {
@@ -712,7 +676,6 @@
ASSERT_EQ(mComponent->queue(&items), C2_OK);
{
- typedef std::unique_lock<std::mutex> ULock;
ULock l(mQueueLock);
if (mWorkQueue.size() != MAX_INPUT_BUFFERS) {
mQueueCondition.wait_for(l, TIME_OUT);
@@ -723,46 +686,6 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-TEST_F(Codec2VideoDecHidlTest, EmptyBufferTest) {
- description("Tests empty input buffer");
- if (mDisableTest) return;
- typedef std::unique_lock<std::mutex> ULock;
- ASSERT_EQ(mComponent->start(), C2_OK);
- std::unique_ptr<C2Work> work;
- // Prepare C2Work
- {
- ULock l(mQueueLock);
- if (!mWorkQueue.empty()) {
- work.swap(mWorkQueue.front());
- mWorkQueue.pop_front();
- } else {
- ASSERT_TRUE(false) << "mWorkQueue Empty at the start of test";
- }
- }
- ASSERT_NE(work, nullptr);
-
- work->input.flags = (C2FrameData::flags_t)0;
- work->input.ordinal.timestamp = 0;
- work->input.ordinal.frameIndex = 0;
- work->input.buffers.clear();
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
-
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
- ASSERT_EQ(mComponent->queue(&items), C2_OK);
-
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(mQueueLock);
- if (mWorkQueue.size() != MAX_INPUT_BUFFERS) {
- mQueueCondition.wait_for(l, TIME_OUT);
- }
- }
- ASSERT_EQ(mWorkQueue.size(), (size_t)MAX_INPUT_BUFFERS);
- ASSERT_EQ(mComponent->stop(), C2_OK);
-}
-
TEST_F(Codec2VideoDecHidlTest, FlushTest) {
description("Tests Flush calls");
if (mDisableTest) return;
@@ -874,6 +797,69 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
+TEST_F(Codec2VideoDecHidlTest, DecodeTestEmptyBuffersInserted) {
+ description("Decode with multiple empty input frames");
+ if (mDisableTest) return;
+
+ char mURL[512], info[512];
+ std::ifstream eleStream, eleInfo;
+
+ strcpy(mURL, gEnv->getRes().c_str());
+ strcpy(info, gEnv->getRes().c_str());
+ GetURLForComponent(mCompName, mURL, info);
+
+ eleInfo.open(info);
+ ASSERT_EQ(eleInfo.is_open(), true) << mURL << " - file not found";
+ android::Vector<FrameInfo> Info;
+ int bytesCount = 0;
+ uint32_t frameId = 0;
+ uint32_t flags = 0;
+ uint32_t timestamp = 0;
+ bool codecConfig = false;
+ // This test introduces empty CSD after every 20th frame
+ // and empty input frames at an interval of 5 frames.
+ while (1) {
+ if (!(frameId % 5)) {
+ if (!(frameId % 20)) flags = 32;
+ else flags = 0;
+ bytesCount = 0;
+ } else {
+ if (!(eleInfo >> bytesCount)) break;
+ eleInfo >> flags;
+ eleInfo >> timestamp;
+ codecConfig = flags ?
+ ((1 << (flags - 1)) & C2FrameData::FLAG_CODEC_CONFIG) != 0 : 0;
+ }
+ Info.push_back({bytesCount, flags, timestamp});
+ frameId++;
+ }
+ eleInfo.close();
+
+ ASSERT_EQ(mComponent->start(), C2_OK);
+ ALOGV("mURL : %s", mURL);
+ eleStream.open(mURL, std::ifstream::binary);
+ ASSERT_EQ(eleStream.is_open(), true);
+ ASSERT_NO_FATAL_FAILURE(decodeNFrames(
+ mComponent, mQueueLock, mQueueCondition, mWorkQueue, mFlushedIndices,
+ mLinearPool, eleStream, &Info, 0, (int)Info.size()));
+
+ // blocking call to ensures application to Wait till all the inputs are
+ // consumed
+ if (!mEos) {
+ ALOGV("Waiting for input consumption");
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
+ }
+
+ eleStream.close();
+ if (mFramesReceived != Info.size()) {
+ ALOGE("Input buffer count and Output buffer count mismatch");
+ ALOGV("framesReceived : %d inputFrames : %zu", mFramesReceived,
+ Info.size());
+ ASSERT_TRUE(false);
+ }
+}
+
} // anonymous namespace
// TODO : Video specific configuration Test
diff --git a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
index 87b7902..8585c87 100644
--- a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
@@ -139,40 +139,11 @@
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
- // handle configuration changes in work done
- if (!work->worklets.empty() &&
- (work->worklets.front()->output.configUpdate.size() != 0)) {
- ALOGV("Config Update");
- std::vector<std::unique_ptr<C2Param>> updates =
- std::move(work->worklets.front()->output.configUpdate);
- std::vector<C2Param*> configParam;
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- for (size_t i = 0; i < updates.size(); ++i) {
- C2Param* param = updates[i].get();
- if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
- mCsd = true;
- }
- }
- }
- mFramesReceived++;
- if (work->result != C2_OK) mFailedWorkReceived++;
- mEos = (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM) != 0;
- auto frameIndexIt =
- std::find(mFlushedIndices.begin(), mFlushedIndices.end(),
- work->input.ordinal.frameIndex.peeku());
- ALOGV("WorkDone: frameID received %d",
- (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
- work->input.buffers.clear();
- work->worklets.clear();
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(mQueueLock);
- mWorkQueue.push_back(std::move(work));
- if (!mFlushedIndices.empty()) {
- mFlushedIndices.erase(frameIndexIt);
- }
- mQueueCondition.notify_all();
+ if (!work->worklets.empty()) {
+ if (work->result != C2_OK) mFailedWorkReceived++;
+ workDone(mComponent, work, mFlushedIndices, mQueueLock,
+ mQueueCondition, mWorkQueue, mEos, mCsd,
+ mFramesReceived);
}
}
}
@@ -272,11 +243,11 @@
strcat(URL, "bbb_352x288_420p_30fps_32frames.yuv");
}
-void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component> &component,
- std::mutex &queueLock, std::condition_variable &queueCondition,
- std::list<std::unique_ptr<C2Work>> &workQueue,
- std::list<uint64_t> &flushedIndices,
- std::shared_ptr<C2BlockPool> &graphicPool,
+void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
+ std::mutex &queueLock, std::condition_variable& queueCondition,
+ std::list<std::unique_ptr<C2Work>>& workQueue,
+ std::list<uint64_t>& flushedIndices,
+ std::shared_ptr<C2BlockPool>& graphicPool,
std::ifstream& eleStream, uint32_t frameID,
uint32_t nFrames, uint32_t nWidth, int32_t nHeight,
bool flushed = false,bool signalEOS = true) {
@@ -319,6 +290,7 @@
flushedIndices.emplace_back(frameID);
}
char* data = (char*)malloc(bytesCount);
+ ASSERT_NE(data, nullptr);
memset(data, 0, bytesCount);
if (eleStream.is_open()) {
eleStream.read(data, bytesCount);
@@ -365,30 +337,6 @@
}
}
-void waitOnInputConsumption(std::mutex &queueLock,
- std::condition_variable &queueCondition,
- std::list<std::unique_ptr<C2Work>> &workQueue,
- size_t bufferCount = MAX_INPUT_BUFFERS) {
- typedef std::unique_lock<std::mutex> ULock;
- uint32_t queueSize;
- int maxRetry = 0;
- {
- ULock l(queueLock);
- queueSize = workQueue.size();
- }
- while ((maxRetry < MAX_RETRY) && (queueSize < bufferCount)) {
- ULock l(queueLock);
- if (queueSize != workQueue.size()) {
- queueSize = workQueue.size();
- maxRetry = 0;
- } else {
- queueCondition.wait_for(l, TIME_OUT);
- maxRetry++;
- }
- }
-}
-
-
TEST_F(Codec2VideoEncHidlTest, validateCompName) {
if (mDisableTest) return;
ALOGV("Checks if the given component is a valid video component");
@@ -488,46 +436,6 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-TEST_F(Codec2VideoEncHidlTest, EmptyBufferTest) {
- description("Tests empty input buffer");
- if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
-
- typedef std::unique_lock<std::mutex> ULock;
- std::unique_ptr<C2Work> work;
- {
- ULock l(mQueueLock);
- if (!mWorkQueue.empty()) {
- work.swap(mWorkQueue.front());
- mWorkQueue.pop_front();
- } else {
- ALOGE("mWorkQueue Empty is not expected at the start of the test");
- ASSERT_TRUE(false);
- }
- }
- ASSERT_NE(work, nullptr);
- work->input.flags = (C2FrameData::flags_t)0;
- work->input.ordinal.timestamp = 0;
- work->input.ordinal.frameIndex = 0;
- work->input.buffers.clear();
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
-
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
- ASSERT_EQ(mComponent->queue(&items), C2_OK);
- uint32_t queueSize;
- {
- ULock l(mQueueLock);
- queueSize = mWorkQueue.size();
- if (queueSize < MAX_INPUT_BUFFERS) {
- mQueueCondition.wait_for(l, TIME_OUT);
- }
- }
- ASSERT_EQ(mWorkQueue.size(), (uint32_t)MAX_INPUT_BUFFERS);
- ASSERT_EQ(mComponent->stop(), C2_OK);
-}
-
TEST_F(Codec2VideoEncHidlTest, FlushTest) {
description("Test Request for flush");
if (mDisableTest) return;
diff --git a/media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h b/media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h
index 1215b13..dd45557 100644
--- a/media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h
@@ -17,9 +17,6 @@
#ifndef MEDIA_C2_VIDEO_HIDL_TEST_COMMON_H
#define MEDIA_C2_VIDEO_HIDL_TEST_COMMON_H
-#define MAX_RETRY 20
-#define TIME_OUT 400ms
-#define MAX_INPUT_BUFFERS 8
#define ENCODER_TIMESTAMP_INCREMENT 40000
#define ENC_NUM_FRAMES 32
#define ENC_DEFAULT_FRAME_WIDTH 352
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 22e8d84..4878974 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -366,16 +366,165 @@
generation, igbp_id, igbp_slot);
}
- // UGLY HACK: assume YCbCr 4:2:0 8-bit format (and lockable via lockYCbCr) if we don't
- // recognize the format
- PixelFormat format = mInfo.mapperInfo.format;
- if (format != PixelFormat::RGBA_8888 && format != PixelFormat::RGBX_8888) {
- format = PixelFormat::YCBCR_420_888;
- }
+ switch (mInfo.mapperInfo.format) {
+ case PixelFormat::RGBA_1010102: {
+ // TRICKY: this is used for media as YUV444 in the case when it is queued directly to a
+ // Surface. In all other cases it is RGBA. We don't know which case it is here, so
+ // default to YUV for now.
+ void *pointer = nullptr;
+ mMapper->lock(
+ const_cast<native_handle_t *>(mBuffer),
+ grallocUsage,
+ { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
+ // TODO: fence
+ hidl_handle(),
+ [&err, &pointer](const auto &maperr, const auto &mapPointer) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ pointer = mapPointer;
+ }
+ });
+ if (err != C2_OK) {
+ ALOGD("lock failed: %d", err);
+ return err;
+ }
+ // treat as 32-bit values
+ addr[C2PlanarLayout::PLANE_Y] = (uint8_t *)pointer;
+ addr[C2PlanarLayout::PLANE_U] = (uint8_t *)pointer;
+ addr[C2PlanarLayout::PLANE_V] = (uint8_t *)pointer;
+ addr[C2PlanarLayout::PLANE_A] = (uint8_t *)pointer;
+ layout->type = C2PlanarLayout::TYPE_YUVA;
+ layout->numPlanes = 4;
+ layout->rootPlanes = 1;
+ layout->planes[C2PlanarLayout::PLANE_Y] = {
+ C2PlaneInfo::CHANNEL_Y, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 32, // allocatedDepth
+ 10, // bitDepth
+ 10, // rightShift
+ C2PlaneInfo::LITTLE_END, // endianness
+ C2PlanarLayout::PLANE_Y, // rootIx
+ 0, // offset
+ };
+ layout->planes[C2PlanarLayout::PLANE_U] = {
+ C2PlaneInfo::CHANNEL_CB, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 32, // allocatedDepth
+ 10, // bitDepth
+ 0, // rightShift
+ C2PlaneInfo::LITTLE_END, // endianness
+ C2PlanarLayout::PLANE_Y, // rootIx
+ 0, // offset
+ };
+ layout->planes[C2PlanarLayout::PLANE_V] = {
+ C2PlaneInfo::CHANNEL_CR, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 32, // allocatedDepth
+ 10, // bitDepth
+ 20, // rightShift
+ C2PlaneInfo::LITTLE_END, // endianness
+ C2PlanarLayout::PLANE_Y, // rootIx
+ 0, // offset
+ };
+ layout->planes[C2PlanarLayout::PLANE_A] = {
+ C2PlaneInfo::CHANNEL_A, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 32, // allocatedDepth
+ 2, // bitDepth
+ 30, // rightShift
+ C2PlaneInfo::LITTLE_END, // endianness
+ C2PlanarLayout::PLANE_Y, // rootIx
+ 0, // offset
+ };
+ break;
+ }
- switch (format) {
+ case PixelFormat::RGBA_8888:
+ // TODO: alpha channel
+ // fall-through
+ case PixelFormat::RGBX_8888: {
+ void *pointer = nullptr;
+ mMapper->lock(
+ const_cast<native_handle_t *>(mBuffer),
+ grallocUsage,
+ { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
+ // TODO: fence
+ hidl_handle(),
+ [&err, &pointer](const auto &maperr, const auto &mapPointer) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ pointer = mapPointer;
+ }
+ });
+ if (err != C2_OK) {
+ ALOGD("lock failed: %d", err);
+ return err;
+ }
+ addr[C2PlanarLayout::PLANE_R] = (uint8_t *)pointer;
+ addr[C2PlanarLayout::PLANE_G] = (uint8_t *)pointer + 1;
+ addr[C2PlanarLayout::PLANE_B] = (uint8_t *)pointer + 2;
+ layout->type = C2PlanarLayout::TYPE_RGB;
+ layout->numPlanes = 3;
+ layout->rootPlanes = 1;
+ layout->planes[C2PlanarLayout::PLANE_R] = {
+ C2PlaneInfo::CHANNEL_R, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 8, // allocatedDepth
+ 8, // bitDepth
+ 0, // rightShift
+ C2PlaneInfo::NATIVE, // endianness
+ C2PlanarLayout::PLANE_R, // rootIx
+ 0, // offset
+ };
+ layout->planes[C2PlanarLayout::PLANE_G] = {
+ C2PlaneInfo::CHANNEL_G, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 8, // allocatedDepth
+ 8, // bitDepth
+ 0, // rightShift
+ C2PlaneInfo::NATIVE, // endianness
+ C2PlanarLayout::PLANE_R, // rootIx
+ 1, // offset
+ };
+ layout->planes[C2PlanarLayout::PLANE_B] = {
+ C2PlaneInfo::CHANNEL_B, // channel
+ 4, // colInc
+ 4 * (int32_t)mInfo.stride, // rowInc
+ 1, // mColSampling
+ 1, // mRowSampling
+ 8, // allocatedDepth
+ 8, // bitDepth
+ 0, // rightShift
+ C2PlaneInfo::NATIVE, // endianness
+ C2PlanarLayout::PLANE_R, // rootIx
+ 2, // offset
+ };
+ break;
+ }
+
case PixelFormat::YCBCR_420_888:
- case PixelFormat::YV12: {
+ // fall-through
+ case PixelFormat::YV12:
+ // fall-through
+ default: {
YCbCrLayout ycbcrLayout;
mMapper->lockYCbCr(
const_cast<native_handle_t *>(mBuffer), grallocUsage,
@@ -450,79 +599,6 @@
}
break;
}
-
- case PixelFormat::RGBA_8888:
- // TODO: alpha channel
- // fall-through
- case PixelFormat::RGBX_8888: {
- void *pointer = nullptr;
- mMapper->lock(
- const_cast<native_handle_t *>(mBuffer),
- grallocUsage,
- { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
- // TODO: fence
- hidl_handle(),
- [&err, &pointer](const auto &maperr, const auto &mapPointer) {
- err = maperr2error(maperr);
- if (err == C2_OK) {
- pointer = mapPointer;
- }
- });
- if (err != C2_OK) {
- ALOGD("lock failed: %d", err);
- return err;
- }
- addr[C2PlanarLayout::PLANE_R] = (uint8_t *)pointer;
- addr[C2PlanarLayout::PLANE_G] = (uint8_t *)pointer + 1;
- addr[C2PlanarLayout::PLANE_B] = (uint8_t *)pointer + 2;
- layout->type = C2PlanarLayout::TYPE_RGB;
- layout->numPlanes = 3;
- layout->rootPlanes = 1;
- layout->planes[C2PlanarLayout::PLANE_R] = {
- C2PlaneInfo::CHANNEL_R, // channel
- 4, // colInc
- 4 * (int32_t)mInfo.stride, // rowInc
- 1, // mColSampling
- 1, // mRowSampling
- 8, // allocatedDepth
- 8, // bitDepth
- 0, // rightShift
- C2PlaneInfo::NATIVE, // endianness
- C2PlanarLayout::PLANE_R, // rootIx
- 0, // offset
- };
- layout->planes[C2PlanarLayout::PLANE_G] = {
- C2PlaneInfo::CHANNEL_G, // channel
- 4, // colInc
- 4 * (int32_t)mInfo.stride, // rowInc
- 1, // mColSampling
- 1, // mRowSampling
- 8, // allocatedDepth
- 8, // bitDepth
- 0, // rightShift
- C2PlaneInfo::NATIVE, // endianness
- C2PlanarLayout::PLANE_R, // rootIx
- 1, // offset
- };
- layout->planes[C2PlanarLayout::PLANE_B] = {
- C2PlaneInfo::CHANNEL_B, // channel
- 4, // colInc
- 4 * (int32_t)mInfo.stride, // rowInc
- 1, // mColSampling
- 1, // mRowSampling
- 8, // allocatedDepth
- 8, // bitDepth
- 0, // rightShift
- C2PlaneInfo::NATIVE, // endianness
- C2PlanarLayout::PLANE_R, // rootIx
- 2, // offset
- };
- break;
- }
- default: {
- ALOGD("unsupported pixel format: %d", mInfo.mapperInfo.format);
- return C2_OMITTED;
- }
}
mLocked = true;
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index 9384ebf..4e9ac6e 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -31,7 +31,7 @@
namespace android {
-class AACSource : public MediaTrackHelperV3 {
+class AACSource : public MediaTrackHelper {
public:
AACSource(
DataSourceHelper *source,
@@ -45,7 +45,7 @@
virtual media_status_t getFormat(AMediaFormat*);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~AACSource();
@@ -195,7 +195,7 @@
return mInitCheck == OK ? 1 : 0;
}
-MediaTrackHelperV3 *AACExtractor::getTrack(size_t index) {
+MediaTrackHelper *AACExtractor::getTrack(size_t index) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
@@ -264,7 +264,7 @@
}
media_status_t AACSource::read(
- MediaBufferHelperV3 **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -287,7 +287,7 @@
return AMEDIA_ERROR_END_OF_STREAM;
}
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
status_t err = mBufferGroup->acquire_buffer(&buffer);
if (err != OK) {
return AMEDIA_ERROR_UNKNOWN;
@@ -316,14 +316,14 @@
////////////////////////////////////////////////////////////////////////////////
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
CDataSource *source,
void *meta) {
off64_t offset = *static_cast<off64_t*>(meta);
- return wrapV3(new AACExtractor(new DataSourceHelper(source), offset));
+ return wrap(new AACExtractor(new DataSourceHelper(source), offset));
}
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
CDataSource *source, float *confidence, void **meta,
FreeMetaFunc *freeMeta) {
off64_t pos = 0;
@@ -383,11 +383,11 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("4fd80eae-03d2-4d72-9eb9-48fa6bb54613"),
1, // version
"AAC Extractor",
- { .v3 = Sniff }
+ { .v2 = Sniff }
};
}
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/AACExtractor.h
index be33bf5..643d3f4 100644
--- a/media/extractors/aac/AACExtractor.h
+++ b/media/extractors/aac/AACExtractor.h
@@ -29,12 +29,12 @@
struct AMessage;
class String8;
-class AACExtractor : public MediaExtractorPluginHelperV3 {
+class AACExtractor : public MediaExtractorPluginHelper {
public:
AACExtractor(DataSourceHelper *source, off64_t offset);
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index 7fd2a41..00d2a92 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -29,7 +29,7 @@
namespace android {
-class AMRSource : public MediaTrackHelperV3 {
+class AMRSource : public MediaTrackHelper {
public:
AMRSource(
DataSourceHelper *source,
@@ -44,7 +44,7 @@
virtual media_status_t getFormat(AMediaFormat *);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~AMRSource();
@@ -209,7 +209,7 @@
return mInitCheck == OK ? 1 : 0;
}
-MediaTrackHelperV3 *AMRExtractor::getTrack(size_t index) {
+MediaTrackHelper *AMRExtractor::getTrack(size_t index) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
@@ -273,7 +273,7 @@
}
media_status_t AMRSource::read(
- MediaBufferHelperV3 **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -322,7 +322,7 @@
return AMEDIA_ERROR_MALFORMED;
}
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
status_t err = mBufferGroup->acquire_buffer(&buffer);
if (err != OK) {
return AMEDIA_ERROR_UNKNOWN;
@@ -363,22 +363,22 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("c86639c9-2f31-40ac-a715-fa01b4493aaf"),
1,
"AMR Extractor",
{
- .v3 = [](
+ .v2 = [](
CDataSource *source,
float *confidence,
void **,
- FreeMetaFunc *) -> CreatorFuncV3 {
+ FreeMetaFunc *) -> CreatorFunc {
DataSourceHelper helper(source);
if (SniffAMR(&helper, nullptr, confidence)) {
return [](
CDataSource *source,
- void *) -> CMediaExtractorV3* {
- return wrapV3(new AMRExtractor(new DataSourceHelper(source)));};
+ void *) -> CMediaExtractor* {
+ return wrap(new AMRExtractor(new DataSourceHelper(source)));};
}
return NULL;
}
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/AMRExtractor.h
index b50ce81..b76ee9c 100644
--- a/media/extractors/amr/AMRExtractor.h
+++ b/media/extractors/amr/AMRExtractor.h
@@ -29,12 +29,12 @@
class String8;
#define OFFSET_TABLE_LEN 300
-class AMRExtractor : public MediaExtractorPluginHelperV3 {
+class AMRExtractor : public MediaExtractorPluginHelper {
public:
explicit AMRExtractor(DataSourceHelper *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index 4e04605..b5eaf9b 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -52,7 +52,7 @@
class FLACParser;
-class FLACSource : public MediaTrackHelperV3 {
+class FLACSource : public MediaTrackHelper {
public:
FLACSource(
@@ -65,7 +65,7 @@
virtual media_status_t getFormat(AMediaFormat *meta);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~FLACSource();
@@ -124,12 +124,12 @@
}
// media buffers
- void allocateBuffers(MediaBufferGroupHelperV3 *group);
+ void allocateBuffers(MediaBufferGroupHelper *group);
void releaseBuffers();
- MediaBufferHelperV3 *readBuffer() {
+ MediaBufferHelper *readBuffer() {
return readBuffer(false, 0LL);
}
- MediaBufferHelperV3 *readBuffer(FLAC__uint64 sample) {
+ MediaBufferHelper *readBuffer(FLAC__uint64 sample) {
return readBuffer(true, sample);
}
@@ -142,7 +142,7 @@
// media buffers
size_t mMaxBufferSize;
- MediaBufferGroupHelperV3 *mGroup;
+ MediaBufferGroupHelper *mGroup;
void (*mCopy)(int16_t *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
// handle to underlying libFLAC parser
@@ -166,7 +166,7 @@
FLAC__StreamDecoderErrorStatus mErrorStatus;
status_t init();
- MediaBufferHelperV3 *readBuffer(bool doSeek, FLAC__uint64 sample);
+ MediaBufferHelper *readBuffer(bool doSeek, FLAC__uint64 sample);
// no copy constructor or assignment
FLACParser(const FLACParser &);
@@ -576,7 +576,7 @@
return OK;
}
-void FLACParser::allocateBuffers(MediaBufferGroupHelperV3 *group)
+void FLACParser::allocateBuffers(MediaBufferGroupHelper *group)
{
CHECK(mGroup == NULL);
mGroup = group;
@@ -588,7 +588,7 @@
{
}
-MediaBufferHelperV3 *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
+MediaBufferHelper *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
{
mWriteRequested = true;
mWriteCompleted = false;
@@ -625,7 +625,7 @@
}
// acquire a media buffer
CHECK(mGroup != NULL);
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
status_t err = mGroup->acquire_buffer(&buffer);
if (err != OK) {
return NULL;
@@ -716,9 +716,9 @@
}
media_status_t FLACSource::read(
- MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
+ MediaBufferHelper **outBuffer, const ReadOptions *options)
{
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
// process an optional seek request
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
@@ -772,7 +772,7 @@
return mInitCheck == OK ? 1 : 0;
}
-MediaTrackHelperV3 *FLACExtractor::getTrack(size_t index)
+MediaTrackHelper *FLACExtractor::getTrack(size_t index)
{
if (mInitCheck != OK || index > 0) {
return NULL;
@@ -828,22 +828,22 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("1364b048-cc45-4fda-9934-327d0ebf9829"),
1,
"FLAC Extractor",
{
- .v3 = [](
+ .v2 = [](
CDataSource *source,
float *confidence,
void **,
- FreeMetaFunc *) -> CreatorFuncV3 {
+ FreeMetaFunc *) -> CreatorFunc {
DataSourceHelper helper(source);
if (SniffFLAC(&helper, confidence)) {
return [](
CDataSource *source,
- void *) -> CMediaExtractorV3* {
- return wrapV3(new FLACExtractor(new DataSourceHelper(source)));};
+ void *) -> CMediaExtractor* {
+ return wrap(new FLACExtractor(new DataSourceHelper(source)));};
}
return NULL;
}
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
index 9604e4a..5a73d20 100644
--- a/media/extractors/flac/FLACExtractor.h
+++ b/media/extractors/flac/FLACExtractor.h
@@ -27,13 +27,13 @@
class FLACParser;
-class FLACExtractor : public MediaExtractorPluginHelperV3 {
+class FLACExtractor : public MediaExtractorPluginHelper {
public:
explicit FLACExtractor(DataSourceHelper *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
index 43f394c..0c74376 100644
--- a/media/extractors/midi/MidiExtractor.cpp
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -32,7 +32,7 @@
// how many Sonivox output buffers to aggregate into one MediaBuffer
static const int NUM_COMBINE_BUFFERS = 4;
-class MidiSource : public MediaTrackHelperV3 {
+class MidiSource : public MediaTrackHelper {
public:
MidiSource(
@@ -44,7 +44,7 @@
virtual media_status_t getFormat(AMediaFormat *);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~MidiSource();
@@ -113,10 +113,10 @@
}
media_status_t MidiSource::read(
- MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
+ MediaBufferHelper **outBuffer, const ReadOptions *options)
{
ALOGV("MidiSource::read");
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
// process an optional seek request
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
@@ -199,7 +199,7 @@
return mIsInitialized ? OK : UNKNOWN_ERROR;
}
-status_t MidiEngine::allocateBuffers(MediaBufferGroupHelperV3 *group) {
+status_t MidiEngine::allocateBuffers(MediaBufferGroupHelper *group) {
// select reverb preset and enable
EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_PRESET, EAS_PARAM_REVERB_CHAMBER);
EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_BYPASS, EAS_FALSE);
@@ -222,13 +222,13 @@
return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
}
-MediaBufferHelperV3* MidiEngine::readBuffer() {
+MediaBufferHelper* MidiEngine::readBuffer() {
EAS_STATE state;
EAS_State(mEasData, mEasHandle, &state);
if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
return NULL;
}
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
status_t err = mGroup->acquire_buffer(&buffer);
if (err != OK) {
ALOGE("readBuffer: no buffer");
@@ -279,6 +279,7 @@
ALOGV("MidiExtractor dtor");
AMediaFormat_delete(mFileMetadata);
AMediaFormat_delete(mTrackMetadata);
+ delete mEngine;
}
size_t MidiExtractor::countTracks()
@@ -286,7 +287,7 @@
return mInitCheck == OK ? 1 : 0;
}
-MediaTrackHelperV3 *MidiExtractor::getTrack(size_t index)
+MediaTrackHelper *MidiExtractor::getTrack(size_t index)
{
if (mInitCheck != OK || index > 0) {
return NULL;
@@ -331,21 +332,21 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("ef6cca0a-f8a2-43e6-ba5f-dfcd7c9a7ef2"),
1,
"MIDI Extractor",
{
- .v3 = [](
+ .v2 = [](
CDataSource *source,
float *confidence,
void **,
- FreeMetaFunc *) -> CreatorFuncV3 {
+ FreeMetaFunc *) -> CreatorFunc {
if (SniffMidi(source, confidence)) {
return [](
CDataSource *source,
- void *) -> CMediaExtractorV3* {
- return wrapV3(new MidiExtractor(source));};
+ void *) -> CMediaExtractor* {
+ return wrap(new MidiExtractor(source));};
}
return NULL;
}
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
index ad345b8..2e78086 100644
--- a/media/extractors/midi/MidiExtractor.h
+++ b/media/extractors/midi/MidiExtractor.h
@@ -38,26 +38,26 @@
status_t initCheck();
- status_t allocateBuffers(MediaBufferGroupHelperV3 *group);
+ status_t allocateBuffers(MediaBufferGroupHelper *group);
status_t releaseBuffers();
status_t seekTo(int64_t positionUs);
- MediaBufferHelperV3* readBuffer();
+ MediaBufferHelper* readBuffer();
private:
MidiIoWrapper *mIoWrapper;
- MediaBufferGroupHelperV3 *mGroup;
+ MediaBufferGroupHelper *mGroup;
EAS_DATA_HANDLE mEasData;
EAS_HANDLE mEasHandle;
const S_EAS_LIB_CONFIG* mEasConfig;
bool mIsInitialized;
};
-class MidiExtractor : public MediaExtractorPluginHelperV3 {
+class MidiExtractor : public MediaExtractorPluginHelper {
public:
explicit MidiExtractor(CDataSource *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 8d028e1..1744d3d 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -11,7 +11,6 @@
shared_libs: [
"liblog",
- "libmediaextractor",
"libmediandk",
],
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 89d5a9f..9f197b0 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -29,10 +29,8 @@
#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaBufferBase.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
#include <media/stagefright/MetaDataUtils.h>
#include <utils/String8.h>
@@ -127,7 +125,7 @@
BlockIterator &operator=(const BlockIterator &);
};
-struct MatroskaSource : public MediaTrackHelperV2 {
+struct MatroskaSource : public MediaTrackHelper {
MatroskaSource(MatroskaExtractor *extractor, size_t index);
virtual media_status_t start();
@@ -136,7 +134,7 @@
virtual media_status_t getFormat(AMediaFormat *);
virtual media_status_t read(
- MediaBufferBase **buffer, const ReadOptions *options);
+ MediaBufferHelper **buffer, const ReadOptions *options);
protected:
virtual ~MatroskaSource();
@@ -156,11 +154,11 @@
BlockIterator mBlockIter;
ssize_t mNALSizeLen; // for type AVC or HEVC
- List<MediaBufferBase *> mPendingFrames;
+ List<MediaBufferHelper *> mPendingFrames;
status_t advance();
- status_t setWebmBlockCryptoInfo(MediaBufferBase *mbuf);
+ status_t setWebmBlockCryptoInfo(MediaBufferHelper *mbuf);
media_status_t readBlock();
void clearPendingFrames();
@@ -265,6 +263,8 @@
return AMEDIA_ERROR_MALFORMED;
}
+ // allocate one small initial buffer, but leave plenty of room to grow
+ mBufferGroup->init(1 /* number of buffers */, 1024 /* buffer size */, 64 /* growth limit */);
mBlockIter.reset();
return AMEDIA_OK;
@@ -569,7 +569,7 @@
void MatroskaSource::clearPendingFrames() {
while (!mPendingFrames.empty()) {
- MediaBufferBase *frame = *mPendingFrames.begin();
+ MediaBufferHelper *frame = *mPendingFrames.begin();
mPendingFrames.erase(mPendingFrames.begin());
frame->release();
@@ -577,7 +577,7 @@
}
}
-status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBufferBase *mbuf) {
+status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBufferHelper *mbuf) {
if (mbuf->range_length() < 1 || mbuf->range_length() - 1 > INT32_MAX) {
// 1-byte signal
return ERROR_MALFORMED;
@@ -591,7 +591,7 @@
return ERROR_MALFORMED;
}
- MetaDataBase &meta = mbuf->meta_data();
+ AMediaFormat *meta = mbuf->meta_data();
if (encrypted) {
uint8_t ctrCounter[16] = { 0 };
const uint8_t *keyId;
@@ -599,9 +599,9 @@
AMediaFormat *trackMeta = mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
AMediaFormat_getBuffer(trackMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY,
(void**)&keyId, &keyIdSize);
- meta.setData(kKeyCryptoKey, 0, keyId, keyIdSize);
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_KEY, keyId, keyIdSize);
memcpy(ctrCounter, data + 1, 8);
- meta.setData(kKeyCryptoIV, 0, ctrCounter, 16);
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_IV, ctrCounter, 16);
if (partitioned) {
/* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -654,8 +654,10 @@
}
uint32_t sizeofPlainSizes = sizeof(uint32_t) * plainSizes.size();
uint32_t sizeofEncryptedSizes = sizeof(uint32_t) * encryptedSizes.size();
- meta.setData(kKeyPlainSizes, 0, plainSizes.data(), sizeofPlainSizes);
- meta.setData(kKeyEncryptedSizes, 0, encryptedSizes.data(), sizeofEncryptedSizes);
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES,
+ plainSizes.data(), sizeofPlainSizes);
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES,
+ encryptedSizes.data(), sizeofEncryptedSizes);
mbuf->set_range(frameOffset, mbuf->range_length() - frameOffset);
} else {
/*
@@ -675,8 +677,10 @@
*/
int32_t plainSizes[] = { 0 };
int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
- meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
- meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES,
+ plainSizes, sizeof(plainSizes));
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES,
+ encryptedSizes, sizeof(encryptedSizes));
mbuf->set_range(9, mbuf->range_length() - 9);
}
} else {
@@ -693,8 +697,10 @@
*/
int32_t plainSizes[] = { static_cast<int32_t>(mbuf->range_length() - 1) };
int32_t encryptedSizes[] = { 0 };
- meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
- meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES,
+ plainSizes, sizeof(plainSizes));
+ AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES,
+ encryptedSizes, sizeof(encryptedSizes));
mbuf->set_range(1, mbuf->range_length() - 1);
}
@@ -721,14 +727,17 @@
}
len += trackInfo->mHeaderLen;
- MediaBufferBase *mbuf = MediaBufferBase::Create(len);
+ MediaBufferHelper *mbuf;
+ mBufferGroup->acquire_buffer(&mbuf, false /* nonblocking */, len /* requested size */);
+ mbuf->set_range(0, len);
uint8_t *data = static_cast<uint8_t *>(mbuf->data());
if (trackInfo->mHeader) {
memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
}
- mbuf->meta_data().setInt64(kKeyTime, timeUs);
- mbuf->meta_data().setInt32(kKeyIsSyncFrame, block->IsKey());
+ AMediaFormat *meta = mbuf->meta_data();
+ AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, block->IsKey());
status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
if (err == OK
@@ -754,7 +763,7 @@
}
media_status_t MatroskaSource::read(
- MediaBufferBase **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t targetSampleTimeUs = -1ll;
@@ -790,13 +799,13 @@
}
}
- MediaBufferBase *frame = *mPendingFrames.begin();
+ MediaBufferHelper *frame = *mPendingFrames.begin();
mPendingFrames.erase(mPendingFrames.begin());
if ((mType != AVC && mType != HEVC) || mNALSizeLen == 0) {
if (targetSampleTimeUs >= 0ll) {
- frame->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(frame->meta_data(),
+ AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
*out = frame;
@@ -819,7 +828,7 @@
size_t srcSize = frame->range_length();
size_t dstSize = 0;
- MediaBufferBase *buffer = NULL;
+ MediaBufferHelper *buffer = NULL;
uint8_t *dstPtr = NULL;
for (int32_t pass = 0; pass < 2; ++pass) {
@@ -879,16 +888,20 @@
// each 4-byte nal size with a 4-byte start code
buffer = frame;
} else {
- buffer = MediaBufferBase::Create(dstSize);
+ mBufferGroup->acquire_buffer(
+ &buffer, false /* nonblocking */, dstSize /* requested size */);
+ buffer->set_range(0, dstSize);
}
+ AMediaFormat *frameMeta = frame->meta_data();
int64_t timeUs;
- CHECK(frame->meta_data().findInt64(kKeyTime, &timeUs));
+ CHECK(AMediaFormat_getInt64(frameMeta, AMEDIAFORMAT_KEY_TIME_US, &timeUs));
int32_t isSync;
- CHECK(frame->meta_data().findInt32(kKeyIsSyncFrame, &isSync));
+ CHECK(AMediaFormat_getInt32(frameMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, &isSync));
- buffer->meta_data().setInt64(kKeyTime, timeUs);
- buffer->meta_data().setInt32(kKeyIsSyncFrame, isSync);
+ AMediaFormat *bufMeta = buffer->meta_data();
+ AMediaFormat_setInt64(bufMeta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
+ AMediaFormat_setInt32(bufMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, isSync);
dstPtr = (uint8_t *)buffer->data();
}
@@ -900,8 +913,8 @@
}
if (targetSampleTimeUs >= 0ll) {
- buffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(buffer->meta_data(),
+ AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
*out = buffer;
@@ -992,7 +1005,7 @@
return mTracks.size();
}
-MediaTrackHelperV2 *MatroskaExtractor::getTrack(size_t index) {
+MediaTrackHelper *MatroskaExtractor::getTrack(size_t index) {
if (index >= mTracks.size()) {
return NULL;
}
@@ -1662,7 +1675,7 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT,
+ EXTRACTORDEF_VERSION,
UUID("abbedd92-38c4-4904-a4c1-b3f45f899980"),
1,
"Matroska Extractor",
@@ -1671,13 +1684,13 @@
CDataSource *source,
float *confidence,
void **,
- FreeMetaFunc *) -> CreatorFuncV2 {
+ FreeMetaFunc *) -> CreatorFunc {
DataSourceHelper helper(source);
if (SniffMatroska(&helper, confidence)) {
return [](
CDataSource *source,
- void *) -> CMediaExtractorV2* {
- return wrapV2(new MatroskaExtractor(new DataSourceHelper(source)));};
+ void *) -> CMediaExtractor* {
+ return wrap(new MatroskaExtractor(new DataSourceHelper(source)));};
}
return NULL;
}
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index 2fa8881..3871bdf 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -35,12 +35,12 @@
struct DataSourceBaseReader;
struct MatroskaSource;
-struct MatroskaExtractor : public MediaExtractorPluginHelperV2 {
+struct MatroskaExtractor : public MediaExtractorPluginHelper {
explicit MatroskaExtractor(DataSourceHelper *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV2 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 7abec54..20bcda8 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -207,7 +207,7 @@
return valid;
}
-class MP3Source : public MediaTrackHelperV3 {
+class MP3Source : public MediaTrackHelper {
public:
MP3Source(
AMediaFormat *meta, DataSourceHelper *source,
@@ -220,7 +220,7 @@
virtual media_status_t getFormat(AMediaFormat *meta);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~MP3Source();
@@ -413,7 +413,7 @@
return mInitCheck != OK ? 0 : 1;
}
-MediaTrackHelperV3 *MP3Extractor::getTrack(size_t index) {
+MediaTrackHelper *MP3Extractor::getTrack(size_t index) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
@@ -493,7 +493,7 @@
}
media_status_t MP3Source::read(
- MediaBufferHelperV3 **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -523,7 +523,7 @@
mSamplesRead = 0;
}
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
status_t err = mBufferGroup->acquire_buffer(&buffer);
if (err != OK) {
return AMEDIA_ERROR_UNKNOWN;
@@ -668,14 +668,14 @@
return AMEDIA_OK;
}
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
CDataSource *source,
void *meta) {
Mp3Meta *metaData = static_cast<Mp3Meta *>(meta);
- return wrapV3(new MP3Extractor(new DataSourceHelper(source), metaData));
+ return wrap(new MP3Extractor(new DataSourceHelper(source), metaData));
}
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
CDataSource *source, float *confidence, void **meta,
FreeMetaFunc *freeMeta) {
off64_t pos = 0;
@@ -712,11 +712,11 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("812a3f6c-c8cf-46de-b529-3774b14103d4"),
1, // version
"MP3 Extractor",
- { .v3 = Sniff }
+ { .v2 = Sniff }
};
}
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/MP3Extractor.h
index fe72cff..1e38ab7 100644
--- a/media/extractors/mp3/MP3Extractor.h
+++ b/media/extractors/mp3/MP3Extractor.h
@@ -32,13 +32,13 @@
class String8;
struct Mp3Meta;
-class MP3Extractor : public MediaExtractorPluginHelperV3 {
+class MP3Extractor : public MediaExtractorPluginHelper {
public:
MP3Extractor(DataSourceHelper *source, Mp3Meta *meta);
~MP3Extractor();
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 91de353..1b308aa 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,7 +15,6 @@
shared_libs: [
"liblog",
- "libmediaextractor",
"libmediandk"
],
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 0579ff0..524db4e 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -44,10 +44,9 @@
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaBufferBase.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MetaDataBase.h>
#include <utils/String8.h>
#include <byteswap.h>
@@ -70,7 +69,7 @@
kMaxAtomSize = 64 * 1024 * 1024,
};
-class MPEG4Source : public MediaTrackHelperV2 {
+class MPEG4Source : public MediaTrackHelper {
static const size_t kMaxPcmFrameSize = 8192;
public:
// Caller retains ownership of both "dataSource" and "sampleTable".
@@ -81,7 +80,8 @@
Vector<SidxEntry> &sidx,
const Trex *trex,
off64_t firstMoofOffset,
- const sp<ItemTable> &itemTable);
+ const sp<ItemTable> &itemTable,
+ int32_t elstShiftStartTicks);
virtual status_t init();
virtual media_status_t start();
@@ -89,10 +89,10 @@
virtual media_status_t getFormat(AMediaFormat *);
- virtual media_status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ virtual media_status_t read(MediaBufferHelper **buffer, const ReadOptions *options = NULL);
virtual bool supportNonblockingRead() { return true; }
virtual media_status_t fragmentedRead(
- MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
virtual ~MPEG4Source();
@@ -110,7 +110,7 @@
off64_t mFirstMoofOffset;
off64_t mCurrentMoofOffset;
off64_t mNextMoofOffset;
- uint32_t mCurrentTime;
+ uint32_t mCurrentTime; // in media timescale ticks
int32_t mLastParsedTrackId;
int32_t mTrackId;
@@ -137,15 +137,17 @@
bool mStarted;
- MediaBufferGroup *mGroup;
-
- MediaBufferBase *mBuffer;
+ MediaBufferHelper *mBuffer;
uint8_t *mSrcBuffer;
bool mIsHeif;
sp<ItemTable> mItemTable;
+ // Start offset from composition time to presentation time.
+ // Support shift only for video tracks through mElstShiftStartTicks for now.
+ int32_t mElstShiftStartTicks;
+
size_t parseNALSize(const uint8_t *data) const;
status_t parseChunk(off64_t *offset);
status_t parseTrackFragmentHeader(off64_t offset, off64_t size);
@@ -464,11 +466,12 @@
[=] {
int64_t duration;
int32_t samplerate;
+ // Only for audio track.
if (track->has_elst && mHeaderTimescale != 0 &&
AMediaFormat_getInt64(track->meta, AMEDIAFORMAT_KEY_DURATION, &duration) &&
AMediaFormat_getInt32(track->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samplerate)) {
- // elst has to be processed only the first time this function is called
+ // Elst has to be processed only the first time this function is called.
track->has_elst = false;
if (track->elst_segment_duration > INT64_MAX) {
@@ -484,67 +487,72 @@
halfscale, mHeaderTimescale, track->timescale);
if ((uint32_t)samplerate != track->timescale){
- ALOGV("samplerate:%" PRId32 ", track->timescale and samplerate are different!", samplerate);
+ ALOGV("samplerate:%" PRId32 ", track->timescale and samplerate are different!",
+ samplerate);
}
-
- int64_t delay;
- // delay = ((media_time * samplerate) + halfscale) / track->timescale;
- if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
- __builtin_add_overflow(delay, halfscale, &delay) ||
- (delay /= track->timescale, false) ||
- delay > INT32_MAX ||
- delay < INT32_MIN) {
- ALOGW("ignoring edit list with bogus values");
- return;
+ // Both delay and paddingsamples have to be set inorder for either to be
+ // effective in the lower layers.
+ int64_t delay = 0;
+ if (media_time > 0) { // Gapless playback
+ // delay = ((media_time * samplerate) + halfscale) / track->timescale;
+ if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
+ __builtin_add_overflow(delay, halfscale, &delay) ||
+ (delay /= track->timescale, false) ||
+ delay > INT32_MAX ||
+ delay < INT32_MIN) {
+ ALOGW("ignoring edit list with bogus values");
+ return;
+ }
}
ALOGV("delay = %" PRId64, delay);
AMediaFormat_setInt32(track->meta, AMEDIAFORMAT_KEY_ENCODER_DELAY, delay);
- int64_t scaled_duration;
- // scaled_duration = duration * mHeaderTimescale;
- if (__builtin_mul_overflow(duration, mHeaderTimescale, &scaled_duration)) {
- return;
- }
- ALOGV("scaled_duration = %" PRId64, scaled_duration);
-
- int64_t segment_end;
- int64_t padding;
- int64_t segment_duration_e6;
- int64_t media_time_scaled_e6;
- int64_t media_time_scaled;
- // padding = scaled_duration - ((segment_duration * 1000000) +
- // ((media_time * mHeaderTimeScale * 1000000)/track->timescale) )
- // segment_duration is based on timescale in movie header box(mdhd)
- // media_time is based on timescale track header/media timescale
- if (__builtin_mul_overflow(segment_duration, 1000000, &segment_duration_e6) ||
- __builtin_mul_overflow(media_time, mHeaderTimescale, &media_time_scaled) ||
- __builtin_mul_overflow(media_time_scaled, 1000000, &media_time_scaled_e6)) {
- return;
- }
- media_time_scaled_e6 /= track->timescale;
- if(__builtin_add_overflow(segment_duration_e6, media_time_scaled_e6, &segment_end) ||
- __builtin_sub_overflow(scaled_duration, segment_end, &padding)) {
- return;
- }
- ALOGV("segment_end = %" PRId64 ", padding = %" PRId64, segment_end, padding);
int64_t paddingsamples = 0;
- if (padding < 0) {
+ if (segment_duration > 0) {
+ int64_t scaled_duration;
+ // scaled_duration = duration * mHeaderTimescale;
+ if (__builtin_mul_overflow(duration, mHeaderTimescale, &scaled_duration)) {
+ return;
+ }
+ ALOGV("scaled_duration = %" PRId64, scaled_duration);
+
+ int64_t segment_end;
+ int64_t padding;
+ int64_t segment_duration_e6;
+ int64_t media_time_scaled_e6;
+ int64_t media_time_scaled;
+ // padding = scaled_duration - ((segment_duration * 1000000) +
+ // ((media_time * mHeaderTimescale * 1000000)/track->timescale) )
+ // segment_duration is based on timescale in movie header box(mdhd)
+ // media_time is based on timescale track header/media timescale
+ if (__builtin_mul_overflow(segment_duration, 1000000, &segment_duration_e6) ||
+ __builtin_mul_overflow(media_time, mHeaderTimescale, &media_time_scaled) ||
+ __builtin_mul_overflow(media_time_scaled, 1000000, &media_time_scaled_e6)) {
+ return;
+ }
+ media_time_scaled_e6 /= track->timescale;
+ if (__builtin_add_overflow(segment_duration_e6, media_time_scaled_e6, &segment_end)
+ || __builtin_sub_overflow(scaled_duration, segment_end, &padding)) {
+ return;
+ }
+ ALOGV("segment_end = %" PRId64 ", padding = %" PRId64, segment_end, padding);
// track duration from media header (which is what AMEDIAFORMAT_KEY_DURATION is)
// might be slightly shorter than the segment duration, which would make the
// padding negative. Clamp to zero.
- padding = 0;
- } else {
- int64_t halfscale_e6;
- int64_t timescale_e6;
- // paddingsamples = ((padding * samplerate) + (halfscale * 1000000))
- // / (mHeaderTimescale * 1000000);
- if (__builtin_mul_overflow(padding, samplerate, &paddingsamples) ||
- __builtin_mul_overflow(halfscale, 1000000, &halfscale_e6) ||
- __builtin_mul_overflow(mHeaderTimescale, 1000000, ×cale_e6) ||
- __builtin_add_overflow(paddingsamples, halfscale_e6, &paddingsamples) ||
- (paddingsamples /= timescale_e6, false) ||
- paddingsamples > INT32_MAX) {
- return;
+ if (padding > 0) {
+ int64_t halfscale_mht = mHeaderTimescale / 2;
+ int64_t halfscale_e6;
+ int64_t timescale_e6;
+ // paddingsamples = ((padding * samplerate) + (halfscale_mht * 1000000))
+ // / (mHeaderTimescale * 1000000);
+ if (__builtin_mul_overflow(padding, samplerate, &paddingsamples) ||
+ __builtin_mul_overflow(halfscale_mht, 1000000, &halfscale_e6) ||
+ __builtin_mul_overflow(mHeaderTimescale, 1000000, ×cale_e6) ||
+ __builtin_add_overflow(paddingsamples, halfscale_e6, &paddingsamples) ||
+ (paddingsamples /= timescale_e6, false) ||
+ paddingsamples > INT32_MAX) {
+ return;
+ }
}
}
ALOGV("paddingsamples = %" PRId64, paddingsamples);
@@ -673,6 +681,7 @@
track->includes_expensive_metadata = false;
track->skipTrack = false;
track->timescale = 1000000;
+ track->elstShiftStartTicks = 0;
}
}
@@ -970,6 +979,7 @@
AMEDIAFORMAT_KEY_MIME, "application/octet-stream");
track->has_elst = false;
track->subsample_encryption = false;
+ track->elstShiftStartTicks = 0;
}
off64_t stop_offset = *offset + chunk_size;
@@ -1097,6 +1107,7 @@
if (entry_count != 1) {
// we only support a single entry at the moment, for gapless playback
+ // or start offset
ALOGW("ignoring edit list with %d entries", entry_count);
} else {
off64_t entriesoffset = data_offset + 8;
@@ -1558,9 +1569,40 @@
return ERROR_IO;
}
- String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
- AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+ // Prior to API 29, the metadata track was not compliant with ISO/IEC
+ // 14496-12-2015. This led to some ISO-compliant parsers failing to read the
+ // metatrack. As of API 29 and onwards, a change was made to metadata track to
+ // make it compliant with the standard. The workaround is to write the
+ // null-terminated mime_format string twice. This allows compliant parsers to
+ // read the missing reserved, data_reference_index, and content_encoding fields
+ // from the first mime_type string. The actual mime_format field would then be
+ // read correctly from the second string. The non-compliant Android frameworks
+ // from API 28 and earlier would still be able to read the mime_format correctly
+ // as it would only read the first null-terminated mime_format string. To enable
+ // reading metadata tracks generated from both the non-compliant and compliant
+ // formats, a check needs to be done to see which format is used.
+ int null_pos = 0;
+ const unsigned char *str = buffer.get();
+ while (null_pos < chunk_data_size) {
+ if (*(str + null_pos) == '\0') {
+ break;
+ }
+ ++null_pos;
+ }
+ if (null_pos == chunk_data_size - 1) {
+ // This is not a standard ompliant metadata track.
+ String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
+ AMediaFormat_setString(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+ } else {
+ // This is a standard compliant metadata track.
+ String8 contentEncoding((const char *)(buffer.get() + 8));
+ String8 mimeFormat((const char *)(buffer.get() + 8 + contentEncoding.size() + 1),
+ chunk_data_size - 8 - contentEncoding.size() - 1);
+ AMediaFormat_setString(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+ }
break;
}
@@ -3830,7 +3872,7 @@
}
}
-MediaTrackHelperV2 *MPEG4Extractor::getTrack(size_t index) {
+MediaTrackHelper *MPEG4Extractor::getTrack(size_t index) {
status_t err;
if ((err = readMetaData()) != OK) {
return NULL;
@@ -3904,9 +3946,15 @@
}
}
+ if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
+ track->elstShiftStartTicks = track->elst_media_time;
+ ALOGV("video track->elstShiftStartTicks :%" PRId64, track->elst_media_time);
+ }
+
MPEG4Source *source = new MPEG4Source(
track->meta, mDataSource, track->timescale, track->sampleTable,
- mSidxEntries, trex, mMoofOffset, itemTable);
+ mSidxEntries, trex, mMoofOffset, itemTable,
+ track->elstShiftStartTicks);
if (source->init() != OK) {
delete source;
return NULL;
@@ -4307,7 +4355,8 @@
Vector<SidxEntry> &sidx,
const Trex *trex,
off64_t firstMoofOffset,
- const sp<ItemTable> &itemTable)
+ const sp<ItemTable> &itemTable,
+ int32_t elstShiftStartTicks)
: mFormat(format),
mDataSource(dataSource),
mTimescale(timeScale),
@@ -4332,11 +4381,11 @@
mIsPcm(false),
mNALLengthSize(0),
mStarted(false),
- mGroup(NULL),
mBuffer(NULL),
mSrcBuffer(NULL),
mIsHeif(itemTable != NULL),
- mItemTable(itemTable) {
+ mItemTable(itemTable),
+ mElstShiftStartTicks(elstShiftStartTicks) {
memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
@@ -4421,11 +4470,31 @@
}
status_t MPEG4Source::init() {
+ status_t err = OK;
+ const char *mime;
+ CHECK(AMediaFormat_getString(mFormat, AMEDIAFORMAT_KEY_MIME, &mime));
if (mFirstMoofOffset != 0) {
off64_t offset = mFirstMoofOffset;
- return parseChunk(&offset);
+ err = parseChunk(&offset);
+ if(err == OK && !strncasecmp("video/", mime, 6)
+ && !mCurrentSamples.isEmpty()) {
+ // Start offset should be less or equal to composition time of first sample.
+ // ISO : sample_composition_time_offset, version 0 (unsigned) for major brands.
+ mElstShiftStartTicks = std::min(mElstShiftStartTicks,
+ (*mCurrentSamples.begin()).compositionOffset);
+ }
+ return err;
}
- return OK;
+
+ if (!strncasecmp("video/", mime, 6)) {
+ uint32_t firstSampleCTS = 0;
+ err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
+ // Start offset should be less or equal to composition time of first sample.
+ // Composition time stamp of first sample cannot be negative.
+ mElstShiftStartTicks = std::min(mElstShiftStartTicks, (int32_t)firstSampleCTS);
+ }
+
+ return err;
}
MPEG4Source::~MPEG4Source() {
@@ -4461,12 +4530,10 @@
const size_t kInitialBuffers = 2;
const size_t kMaxBuffers = 8;
const size_t realMaxBuffers = min(kMaxBufferSize / max_size, kMaxBuffers);
- mGroup = new MediaBufferGroup(kInitialBuffers, max_size, realMaxBuffers);
+ mBufferGroup->init(kInitialBuffers, max_size, realMaxBuffers);
mSrcBuffer = new (std::nothrow) uint8_t[max_size];
if (mSrcBuffer == NULL) {
// file probably specified a bad max size
- delete mGroup;
- mGroup = NULL;
return AMEDIA_ERROR_MALFORMED;
}
@@ -4488,9 +4555,6 @@
delete[] mSrcBuffer;
mSrcBuffer = NULL;
- delete mGroup;
- mGroup = NULL;
-
mStarted = false;
mCurrentSampleIndex = 0;
@@ -4971,7 +5035,7 @@
status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
- ALOGV("MPEG4Extractor::parseTrackFragmentRun");
+ ALOGV("MPEG4Source::parseTrackFragmentRun");
if (size < 8) {
return -EINVAL;
}
@@ -5113,10 +5177,10 @@
}
ALOGV("adding sample %d at offset 0x%08" PRIx64 ", size %u, duration %u, "
- " flags 0x%08x", i + 1,
+ " flags 0x%08x ctsOffset %" PRIu32, i + 1,
dataOffset, sampleSize, sampleDuration,
(flags & kFirstSampleFlagsPresent) && i == 0
- ? firstSampleFlags : sampleFlags);
+ ? firstSampleFlags : sampleFlags, sampleCtsOffset);
tmp.offset = dataOffset;
tmp.size = sampleSize;
tmp.duration = sampleDuration;
@@ -5187,12 +5251,12 @@
}
media_status_t MPEG4Source::read(
- MediaBufferBase **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
Mutex::Autolock autoLock(mLock);
CHECK(mStarted);
- if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
+ if (options != nullptr && options->getNonBlocking() && !mBufferGroup->has_buffers()) {
*out = nullptr;
return AMEDIA_ERROR_WOULD_BLOCK;
}
@@ -5208,6 +5272,7 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+
if (mIsHeif) {
CHECK(mSampleTable == NULL);
CHECK(mItemTable != NULL);
@@ -5245,6 +5310,9 @@
CHECK(!"Should not be here.");
break;
}
+ if( mode != ReadOptions::SEEK_FRAME_INDEX) {
+ seekTimeUs += ((int64_t)mElstShiftStartTicks * 1000000) / mTimescale;
+ }
uint32_t sampleIndex;
status_t err = mSampleTable->findSampleAtTime(
@@ -5286,6 +5354,7 @@
if (mode == ReadOptions::SEEK_CLOSEST
|| mode == ReadOptions::SEEK_FRAME_INDEX) {
+ sampleTime -= mElstShiftStartTicks;
targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
}
@@ -5324,6 +5393,10 @@
if (!mIsHeif) {
err = mSampleTable->getMetaDataForSample(
mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
+ if(err == OK) {
+ cts -= mElstShiftStartTicks;
+ }
+
} else {
err = mItemTable->getImageOffsetAndSize(
options && options->getSeekTo(&seekTimeUs, &mode) ?
@@ -5341,7 +5414,7 @@
return AMEDIA_ERROR_UNKNOWN;
}
- err = mGroup->acquire_buffer(&mBuffer);
+ err = mBufferGroup->acquire_buffer(&mBuffer);
if (err != OK) {
CHECK(mBuffer == NULL);
@@ -5380,9 +5453,11 @@
return AMEDIA_ERROR_IO;
}
- mBuffer->meta_data().clear();
- mBuffer->meta_data().setInt64(kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat *meta = mBuffer->meta_data();
+ AMediaFormat_clear(meta);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
int32_t byteOrder;
AMediaFormat_getInt32(mFormat,
@@ -5413,19 +5488,20 @@
CHECK(mBuffer != NULL);
mBuffer->set_range(0, size);
- mBuffer->meta_data().clear();
- mBuffer->meta_data().setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt64(
- kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+ AMediaFormat *meta = mBuffer->meta_data();
+ AMediaFormat_clear(meta);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
if (isSyncSample) {
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
++mCurrentSampleIndex;
@@ -5468,19 +5544,20 @@
}
mBuffer->set_range(0, dstOffset + size);
- mBuffer->meta_data().clear();
- mBuffer->meta_data().setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt64(
- kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+ AMediaFormat *meta = mBuffer->meta_data();
+ AMediaFormat_clear(meta);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
if (isSyncSample) {
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
++mCurrentSampleIndex;
@@ -5548,31 +5625,32 @@
CHECK(mBuffer != NULL);
mBuffer->set_range(0, dstOffset);
- mBuffer->meta_data().clear();
- mBuffer->meta_data().setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt64(
- kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+ AMediaFormat *meta = mBuffer->meta_data();
+ AMediaFormat_clear(meta);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(
+ meta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
if (mIsAVC) {
uint32_t layerId = FindAVCLayerId(
(const uint8_t *)mBuffer->data(), mBuffer->range_length());
- mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID, layerId);
} else if (mIsHEVC) {
int32_t layerId = parseHEVCLayerId(
(const uint8_t *)mBuffer->data(), mBuffer->range_length());
if (layerId >= 0) {
- mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID, layerId);
}
}
if (isSyncSample) {
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
++mCurrentSampleIndex;
@@ -5585,7 +5663,7 @@
}
media_status_t MPEG4Source::fragmentedRead(
- MediaBufferBase **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
ALOGV("MPEG4Source::fragmentedRead");
@@ -5599,6 +5677,10 @@
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ seekTimeUs += ((int64_t)mElstShiftStartTicks * 1000000) / mTimescale;
+ ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRId32, seekTimeUs,
+ mElstShiftStartTicks);
+
int numSidxEntries = mSegments.size();
if (numSidxEntries != 0) {
int64_t totalTime = 0;
@@ -5685,10 +5767,12 @@
offset = smpl->offset;
size = smpl->size;
cts = mCurrentTime + smpl->compositionOffset;
+ cts -= mElstShiftStartTicks;
+
mCurrentTime += smpl->duration;
isSyncSample = (mCurrentSampleIndex == 0);
- status_t err = mGroup->acquire_buffer(&mBuffer);
+ status_t err = mBufferGroup->acquire_buffer(&mBuffer);
if (err != OK) {
CHECK(mBuffer == NULL);
@@ -5704,19 +5788,21 @@
}
const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
- MetaDataBase &bufmeta = mBuffer->meta_data();
- bufmeta.clear();
+ AMediaFormat *bufmeta = mBuffer->meta_data();
+ AMediaFormat_clear(bufmeta);
if (smpl->encryptedsizes.size()) {
// store clear/encrypted lengths in metadata
- bufmeta.setData(kKeyPlainSizes, 0,
+ AMediaFormat_setBuffer(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES,
smpl->clearsizes.array(), smpl->clearsizes.size() * 4);
- bufmeta.setData(kKeyEncryptedSizes, 0,
+ AMediaFormat_setBuffer(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES,
smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4);
- bufmeta.setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize);
- bufmeta.setInt32(kKeyCryptoMode, mCryptoMode);
- bufmeta.setData(kKeyCryptoKey, 0, mCryptoKey, 16);
- bufmeta.setInt32(kKeyEncryptedByteBlock, mDefaultEncryptedByteBlock);
- bufmeta.setInt32(kKeySkipByteBlock, mDefaultSkipByteBlock);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, mDefaultIVSize);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, mCryptoMode);
+ AMediaFormat_setBuffer(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, mCryptoKey, 16);
+ AMediaFormat_setInt32(bufmeta,
+ AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK, mDefaultEncryptedByteBlock);
+ AMediaFormat_setInt32(bufmeta,
+ AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK, mDefaultSkipByteBlock);
void *iv = NULL;
size_t ivlength = 0;
@@ -5725,8 +5811,7 @@
iv = (void *) smpl->iv;
ivlength = 16; // use 16 or the actual size?
}
- bufmeta.setData(kKeyCryptoIV, 0, iv, ivlength);
-
+ AMediaFormat_setBuffer(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_IV, iv, ivlength);
}
if (!mIsAVC && !mIsHEVC) {
@@ -5752,30 +5837,29 @@
CHECK(mBuffer != NULL);
mBuffer->set_range(0, size);
- mBuffer->meta_data().setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt64(
- kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
+ AMediaFormat_setInt64(bufmeta,
+ AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt64(bufmeta,
+ AMEDIAFORMAT_KEY_DURATION, ((int64_t)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(bufmeta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
if (mIsAVC) {
uint32_t layerId = FindAVCLayerId(
(const uint8_t *)mBuffer->data(), mBuffer->range_length());
- mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID, layerId);
} else if (mIsHEVC) {
int32_t layerId = parseHEVCLayerId(
(const uint8_t *)mBuffer->data(), mBuffer->range_length());
if (layerId >= 0) {
- mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID, layerId);
}
}
if (isSyncSample) {
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
++mCurrentSampleIndex;
@@ -5867,18 +5951,18 @@
CHECK(mBuffer != NULL);
mBuffer->set_range(0, dstOffset);
- mBuffer->meta_data().setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data().setInt64(
- kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
+ AMediaFormat *bufmeta = mBuffer->meta_data();
+ AMediaFormat_setInt64(bufmeta,
+ AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMediaFormat_setInt64(bufmeta,
+ AMEDIAFORMAT_KEY_DURATION, ((int64_t)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data().setInt64(
- kKeyTargetTime, targetSampleTimeUs);
+ AMediaFormat_setInt64(bufmeta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
}
if (isSyncSample) {
- mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(bufmeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
++mCurrentSampleIndex;
@@ -6079,11 +6163,11 @@
return true;
}
-static CMediaExtractorV2* CreateExtractor(CDataSource *source, void *) {
- return wrapV2(new MPEG4Extractor(new DataSourceHelper(source)));
+static CMediaExtractor* CreateExtractor(CDataSource *source, void *) {
+ return wrap(new MPEG4Extractor(new DataSourceHelper(source)));
}
-static CreatorFuncV2 Sniff(
+static CreatorFunc Sniff(
CDataSource *source, float *confidence, void **,
FreeMetaFunc *) {
DataSourceHelper helper(source);
@@ -6104,7 +6188,7 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT,
+ EXTRACTORDEF_VERSION,
UUID("27575c67-4417-4c54-8d3d-8e626985a164"),
2, // version
"MP4 Extractor",
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 56b641d..fadfb50 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -53,12 +53,12 @@
uint32_t default_sample_flags;
};
-class MPEG4Extractor : public MediaExtractorPluginHelperV2 {
+class MPEG4Extractor : public MediaExtractorPluginHelper {
public:
explicit MPEG4Extractor(DataSourceHelper *source, const char *mime = NULL);
virtual size_t countTracks();
- virtual MediaTrackHelperV2 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
@@ -85,6 +85,7 @@
bool has_elst;
int64_t elst_media_time;
uint64_t elst_segment_duration;
+ int32_t elstShiftStartTicks;
bool subsample_encryption;
};
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 38c86eb..b816093 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -15,14 +15,11 @@
"android.hardware.cas@1.0",
"android.hardware.cas.native@1.0",
"android.hidl.token@1.0-utils",
- "libbinder",
- "libcrypto",
- "libcutils",
- "libhidlallocatorutils",
+ "android.hidl.allocator@1.0",
+ "libhidlmemory",
"libhidlbase",
"liblog",
- "libmediaextractor",
- "libstagefright_foundation",
+ "libmediandk",
],
header_libs: [
@@ -30,8 +27,13 @@
],
static_libs: [
+ "libcrypto",
+ "libstagefright_foundation",
"libstagefright_mpeg2support",
+ "libmediaextractor",
"libutils",
+ "libstagefright",
+ "libstagefright_esds",
],
name: "libmpeg2extractor",
diff --git a/media/extractors/mpeg2/ExtractorBundle.cpp b/media/extractors/mpeg2/ExtractorBundle.cpp
index 366aa59..2f4196c 100644
--- a/media/extractors/mpeg2/ExtractorBundle.cpp
+++ b/media/extractors/mpeg2/ExtractorBundle.cpp
@@ -36,7 +36,7 @@
1,
"MPEG2-PS/TS Extractor",
{
- [](
+ .v2 = [](
CDataSource *source,
float *confidence,
void **,
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index fc13d2c..554d252 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -33,22 +33,23 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
#include <utils/String8.h>
#include <inttypes.h>
namespace android {
-struct MPEG2PSExtractor::Track : public MediaTrackHelper, public RefBase {
+struct MPEG2PSExtractor::Track : public MediaTrackHelper {
Track(MPEG2PSExtractor *extractor,
unsigned stream_id, unsigned stream_type);
- virtual status_t start();
- virtual status_t stop();
- virtual status_t getFormat(MetaDataBase &);
+ virtual media_status_t start();
+ virtual media_status_t stop();
+ virtual media_status_t getFormat(AMediaFormat *);
- virtual status_t read(
- MediaBufferBase **buffer, const ReadOptions *options);
+ virtual media_status_t read(
+ MediaBufferHelper **buffer, const ReadOptions *options);
protected:
virtual ~Track();
@@ -72,21 +73,21 @@
};
struct MPEG2PSExtractor::WrappedTrack : public MediaTrackHelper {
- WrappedTrack(MPEG2PSExtractor *extractor, const sp<Track> &track);
+ WrappedTrack(MPEG2PSExtractor *extractor, Track *track);
- virtual status_t start();
- virtual status_t stop();
- virtual status_t getFormat(MetaDataBase &);
+ virtual media_status_t start();
+ virtual media_status_t stop();
+ virtual media_status_t getFormat(AMediaFormat *);
- virtual status_t read(
- MediaBufferBase **buffer, const ReadOptions *options);
+ virtual media_status_t read(
+ MediaBufferHelper **buffer, const ReadOptions *options);
protected:
virtual ~WrappedTrack();
private:
MPEG2PSExtractor *mExtractor;
- sp<MPEG2PSExtractor::Track> mTrack;
+ MPEG2PSExtractor::Track *mTrack;
DISALLOW_EVIL_CONSTRUCTORS(WrappedTrack);
};
@@ -107,13 +108,14 @@
}
// Remove all tracks that were unable to determine their format.
- MetaDataBase meta;
+ AMediaFormat *meta = AMediaFormat_new();
for (size_t i = mTracks.size(); i > 0;) {
i--;
- if (mTracks.valueAt(i)->getFormat(meta) != OK) {
+ if (mTracks.valueAt(i)->getFormat(meta) != AMEDIA_OK) {
mTracks.removeItemsAt(i);
}
}
+ AMediaFormat_delete(meta);
mScanning = false;
}
@@ -134,20 +136,20 @@
return new WrappedTrack(this, mTracks.valueAt(index));
}
-status_t MPEG2PSExtractor::getTrackMetaData(
- MetaDataBase &meta,
+media_status_t MPEG2PSExtractor::getTrackMetaData(
+ AMediaFormat *meta,
size_t index, uint32_t /* flags */) {
if (index >= mTracks.size()) {
- return UNKNOWN_ERROR;
+ return AMEDIA_ERROR_UNKNOWN;
}
return mTracks.valueAt(index)->getFormat(meta);
}
-status_t MPEG2PSExtractor::getMetaData(MetaDataBase &meta) {
- meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
+media_status_t MPEG2PSExtractor::getMetaData(AMediaFormat *meta) {
+ AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
- return OK;
+ return AMEDIA_OK;
}
uint32_t MPEG2PSExtractor::flags() const {
@@ -635,42 +637,55 @@
mQueue = NULL;
}
-status_t MPEG2PSExtractor::Track::start() {
+media_status_t MPEG2PSExtractor::Track::start() {
if (mSource == NULL) {
- return NO_INIT;
+ return AMEDIA_ERROR_UNKNOWN;
}
- return mSource->start(NULL); // AnotherPacketSource::start doesn't use its argument
+ // initialize with one small buffer, but allow growth
+ mBufferGroup->init(1 /* one buffer */, 256 /* buffer size */, 64 /* max number of buffers */);
+
+ if (mSource->start(NULL) == OK) { // AnotherPacketSource::start doesn't use its argument
+ return AMEDIA_OK;
+ }
+ return AMEDIA_ERROR_UNKNOWN;
}
-status_t MPEG2PSExtractor::Track::stop() {
+media_status_t MPEG2PSExtractor::Track::stop() {
if (mSource == NULL) {
- return NO_INIT;
+ return AMEDIA_ERROR_UNKNOWN;
}
- return mSource->stop();
+ if (mSource->stop() == OK) {
+ return AMEDIA_OK;
+ }
+ return AMEDIA_ERROR_UNKNOWN;
}
-status_t MPEG2PSExtractor::Track::getFormat(MetaDataBase &meta) {
+void copyAMessageToAMediaFormat(AMediaFormat *format, sp<AMessage> msg);
+
+media_status_t MPEG2PSExtractor::Track::getFormat(AMediaFormat *meta) {
if (mSource == NULL) {
- return NO_INIT;
+ return AMEDIA_ERROR_UNKNOWN;
}
sp<MetaData> sourceMeta = mSource->getFormat();
- meta = *sourceMeta;
- return OK;
+ sp<AMessage> msg;
+ convertMetaDataToMessage(sourceMeta, &msg);
+ copyAMessageToAMediaFormat(meta, msg);
+ return AMEDIA_OK;
}
-status_t MPEG2PSExtractor::Track::read(
- MediaBufferBase **buffer, const ReadOptions *options) {
+media_status_t MPEG2PSExtractor::Track::read(
+ MediaBufferHelper **buffer, const ReadOptions *options) {
if (mSource == NULL) {
- return NO_INIT;
+ return AMEDIA_ERROR_UNKNOWN;
}
status_t finalResult;
while (!mSource->hasBufferAvailable(&finalResult)) {
if (finalResult != OK) {
- return ERROR_END_OF_STREAM;
+ return AMEDIA_ERROR_END_OF_STREAM;
}
status_t err = mExtractor->feedMore();
@@ -680,7 +695,47 @@
}
}
- return mSource->read(buffer, (MediaSource::ReadOptions*)options);
+ MediaBufferBase *mbuf;
+ mSource->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ size_t length = mbuf->range_length();
+ MediaBufferHelper *outbuf;
+ mBufferGroup->acquire_buffer(&outbuf, false, length);
+ memcpy(outbuf->data(), mbuf->data(), length);
+ outbuf->set_range(0, length);
+ *buffer = outbuf;
+ MetaDataBase &inMeta = mbuf->meta_data();
+ AMediaFormat *outMeta = outbuf->meta_data();
+ int64_t val64;
+ if (inMeta.findInt64(kKeyTime, &val64)) {
+ AMediaFormat_setInt64(outMeta, AMEDIAFORMAT_KEY_TIME_US, val64);
+ }
+ int32_t val32;
+ if (inMeta.findInt32(kKeyIsSyncFrame, &val32)) {
+ AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, val32);
+ }
+ if (inMeta.findInt32(kKeyCryptoMode, &val32)) {
+ AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, val32);
+ }
+ uint32_t bufType;
+ const void *bufData;
+ size_t bufSize;
+ if (inMeta.findData(kKeyCryptoIV, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_IV, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyCryptoKey, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyPlainSizes, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyEncryptedSizes, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeySEI, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_SEI, bufData, bufSize);
+ }
+ mbuf->release();
+ return AMEDIA_OK;
}
status_t MPEG2PSExtractor::Track::appendPESData(
@@ -726,7 +781,7 @@
////////////////////////////////////////////////////////////////////////////////
MPEG2PSExtractor::WrappedTrack::WrappedTrack(
- MPEG2PSExtractor *extractor, const sp<Track> &track)
+ MPEG2PSExtractor *extractor, Track *track)
: mExtractor(extractor),
mTrack(track) {
}
@@ -734,20 +789,20 @@
MPEG2PSExtractor::WrappedTrack::~WrappedTrack() {
}
-status_t MPEG2PSExtractor::WrappedTrack::start() {
+media_status_t MPEG2PSExtractor::WrappedTrack::start() {
return mTrack->start();
}
-status_t MPEG2PSExtractor::WrappedTrack::stop() {
+media_status_t MPEG2PSExtractor::WrappedTrack::stop() {
return mTrack->stop();
}
-status_t MPEG2PSExtractor::WrappedTrack::getFormat(MetaDataBase &meta) {
+media_status_t MPEG2PSExtractor::WrappedTrack::getFormat(AMediaFormat *meta) {
return mTrack->getFormat(meta);
}
-status_t MPEG2PSExtractor::WrappedTrack::read(
- MediaBufferBase **buffer, const ReadOptions *options) {
+media_status_t MPEG2PSExtractor::WrappedTrack::read(
+ MediaBufferHelper **buffer, const ReadOptions *options) {
return mTrack->read(buffer, options);
}
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/MPEG2PSExtractor.h
index c4082ef..e5d591f 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.h
@@ -37,9 +37,9 @@
virtual size_t countTracks();
virtual MediaTrackHelper *getTrack(size_t index);
- virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+ virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
- virtual status_t getMetaData(MetaDataBase& meta);
+ virtual media_status_t getMetaData(AMediaFormat *meta);
virtual uint32_t flags() const;
virtual const char * name() { return "MPEG2PSExtractor"; }
@@ -57,7 +57,7 @@
off64_t mOffset;
status_t mFinalResult;
sp<ABuffer> mBuffer;
- KeyedVector<unsigned, sp<Track> > mTracks;
+ KeyedVector<unsigned, Track* > mTracks;
bool mScanning;
bool mProgramStreamMapValid;
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index 605b13a..3bb2af7 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -34,6 +34,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
#include <utils/String8.h>
#include "mpeg2ts/AnotherPacketSource.h"
@@ -57,12 +58,12 @@
bool doesSeek);
virtual ~MPEG2TSSource();
- virtual status_t start();
- virtual status_t stop();
- virtual status_t getFormat(MetaDataBase &);
+ virtual media_status_t start();
+ virtual media_status_t stop();
+ virtual media_status_t getFormat(AMediaFormat *);
- virtual status_t read(
- MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ virtual media_status_t read(
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
private:
MPEG2TSExtractor *mExtractor;
@@ -87,22 +88,84 @@
MPEG2TSSource::~MPEG2TSSource() {
}
-status_t MPEG2TSSource::start() {
- return mImpl->start(NULL); // AnotherPacketSource::start() doesn't use its argument
+media_status_t MPEG2TSSource::start() {
+ // initialize with one small buffer, but allow growth
+ mBufferGroup->init(1 /* one buffer */, 256 /* buffer size */, 64 /* max number of buffers */);
+
+ if (!mImpl->start(NULL)) { // AnotherPacketSource::start() doesn't use its argument
+ return AMEDIA_OK;
+ }
+ return AMEDIA_ERROR_UNKNOWN;
}
-status_t MPEG2TSSource::stop() {
- return mImpl->stop();
+media_status_t MPEG2TSSource::stop() {
+ if (!mImpl->stop()) {
+ return AMEDIA_OK;
+ }
+ return AMEDIA_ERROR_UNKNOWN;
}
-status_t MPEG2TSSource::getFormat(MetaDataBase &meta) {
+void copyAMessageToAMediaFormat(AMediaFormat *format, sp<AMessage> msg) {
+ size_t numEntries = msg->countEntries();
+ for (size_t i = 0; i < numEntries; i++) {
+ AMessage::Type type;
+ const char *name = msg->getEntryNameAt(i, &type);
+ AMessage::ItemData id = msg->getEntryAt(i);
+
+ switch (type) {
+ case AMessage::kTypeInt32:
+ int32_t val32;
+ if (id.find(&val32)) {
+ AMediaFormat_setInt32(format, name, val32);
+ }
+ break;
+ case AMessage::kTypeInt64:
+ int64_t val64;
+ if (id.find(&val64)) {
+ AMediaFormat_setInt64(format, name, val64);
+ }
+ break;
+ case AMessage::kTypeFloat:
+ float valfloat;
+ if (id.find(&valfloat)) {
+ AMediaFormat_setFloat(format, name, valfloat);
+ }
+ break;
+ case AMessage::kTypeDouble:
+ double valdouble;
+ if (id.find(&valdouble)) {
+ AMediaFormat_setDouble(format, name, valdouble);
+ }
+ break;
+ case AMessage::kTypeString:
+ if (AString s; id.find(&s)) {
+ AMediaFormat_setString(format, name, s.c_str());
+ }
+ break;
+ case AMessage::kTypeBuffer:
+ {
+ sp<ABuffer> buffer;
+ if (id.find(&buffer)) {
+ AMediaFormat_setBuffer(format, name, buffer->data(), buffer->size());
+ }
+ break;
+ }
+ default:
+ ALOGW("ignoring unsupported type %d '%s'", type, name);
+ }
+ }
+}
+
+media_status_t MPEG2TSSource::getFormat(AMediaFormat *meta) {
sp<MetaData> implMeta = mImpl->getFormat();
- meta = *implMeta;
- return OK;
+ sp<AMessage> msg;
+ convertMetaDataToMessage(implMeta, &msg);
+ copyAMessageToAMediaFormat(meta, msg);
+ return AMEDIA_OK;
}
-status_t MPEG2TSSource::read(
- MediaBufferBase **out, const ReadOptions *options) {
+media_status_t MPEG2TSSource::read(
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -110,16 +173,59 @@
if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
// seek is needed
status_t err = mExtractor->seek(seekTimeUs, (ReadOptions::SeekMode)seekMode);
- if (err != OK) {
- return err;
+ if (err == ERROR_END_OF_STREAM) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ } else if (err != OK) {
+ return AMEDIA_ERROR_UNKNOWN;
}
}
if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
- return ERROR_END_OF_STREAM;
+ return AMEDIA_ERROR_END_OF_STREAM;
}
- return mImpl->read(out, (MediaSource::ReadOptions*) options);
+ MediaBufferBase *mbuf;
+ mImpl->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ size_t length = mbuf->range_length();
+ MediaBufferHelper *outbuf;
+ mBufferGroup->acquire_buffer(&outbuf, false, length);
+ memcpy(outbuf->data(), mbuf->data(), length);
+ outbuf->set_range(0, length);
+ *out = outbuf;
+ MetaDataBase &inMeta = mbuf->meta_data();
+ AMediaFormat *outMeta = outbuf->meta_data();
+ AMediaFormat_clear(outMeta);
+ int64_t val64;
+ if (inMeta.findInt64(kKeyTime, &val64)) {
+ AMediaFormat_setInt64(outMeta, AMEDIAFORMAT_KEY_TIME_US, val64);
+ }
+ int32_t val32;
+ if (inMeta.findInt32(kKeyIsSyncFrame, &val32)) {
+ AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, val32);
+ }
+ if (inMeta.findInt32(kKeyCryptoMode, &val32)) {
+ AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, val32);
+ }
+ uint32_t bufType;
+ const void *bufData;
+ size_t bufSize;
+ if (inMeta.findData(kKeyCryptoIV, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_IV, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyCryptoKey, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyPlainSizes, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeyEncryptedSizes, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES, bufData, bufSize);
+ }
+ if (inMeta.findData(kKeySEI, &bufType, &bufData, &bufSize)) {
+ AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_SEI, bufData, bufSize);
+ }
+ mbuf->release();
+ return AMEDIA_OK;
}
////////////////////////////////////////////////////////////////////////////////
@@ -151,22 +257,23 @@
(mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
}
-status_t MPEG2TSExtractor::getTrackMetaData(
- MetaDataBase &meta,
+media_status_t MPEG2TSExtractor::getTrackMetaData(
+ AMediaFormat *meta,
size_t index, uint32_t /* flags */) {
sp<MetaData> implMeta = index < mSourceImpls.size()
? mSourceImpls.editItemAt(index)->getFormat() : NULL;
if (implMeta == NULL) {
- return UNKNOWN_ERROR;
+ return AMEDIA_ERROR_UNKNOWN;
}
- meta = *implMeta;
- return OK;
+ sp<AMessage> msg = new AMessage;
+ convertMetaDataToMessage(implMeta, &msg);
+ copyAMessageToAMediaFormat(meta, msg);
+ return AMEDIA_OK;
}
-status_t MPEG2TSExtractor::getMetaData(MetaDataBase &meta) {
- meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
- return OK;
+media_status_t MPEG2TSExtractor::getMetaData(AMediaFormat *meta) {
+ AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
+ return AMEDIA_OK;
}
//static
@@ -177,7 +284,7 @@
|| !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
}
-status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
+media_status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
HalToken halToken;
halToken.setToExternal((uint8_t*)casToken, size);
sp<ICas> cas = ICas::castFrom(retrieveHalInterface(halToken));
@@ -187,8 +294,9 @@
if (err == OK) {
ALOGI("All tracks now have descramblers");
init();
+ return AMEDIA_OK;
}
- return err;
+ return AMEDIA_ERROR_UNKNOWN;
}
void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index 4013442..e425d23 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -43,11 +43,11 @@
virtual size_t countTracks();
virtual MediaTrackHelper *getTrack(size_t index);
- virtual status_t getTrackMetaData(MetaDataBase &meta, size_t index, uint32_t flags);
+ virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
- virtual status_t getMetaData(MetaDataBase& meta);
+ virtual media_status_t getMetaData(AMediaFormat *meta);
- virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) override;
+ virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) override;
virtual uint32_t flags() const;
virtual const char * name() { return "MPEG2TSExtractor"; }
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index b28877d..604ec59 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -13,7 +13,6 @@
shared_libs: [
"liblog",
- "libmediaextractor",
"libmediandk",
],
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index cc2c792..c3914f1 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -28,11 +28,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/foundation/ByteUtils.h>
-#include <media/stagefright/MediaBufferBase.h>
-#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaDataBase.h>
#include <media/stagefright/MetaDataUtils.h>
#include <system/audio.h>
#include <utils/String8.h>
@@ -48,7 +45,7 @@
namespace android {
-struct OggSource : public MediaTrackHelperV2 {
+struct OggSource : public MediaTrackHelper {
explicit OggSource(OggExtractor *extractor);
virtual media_status_t getFormat(AMediaFormat *);
@@ -57,7 +54,7 @@
virtual media_status_t stop();
virtual media_status_t read(
- MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
protected:
virtual ~OggSource();
@@ -85,7 +82,7 @@
status_t seekToTime(int64_t timeUs);
status_t seekToOffset(off64_t offset);
- virtual media_status_t readNextPacket(MediaBufferBase **buffer) = 0;
+ virtual media_status_t readNextPacket(MediaBufferHelper **buffer) = 0;
status_t init();
@@ -93,6 +90,9 @@
return AMediaFormat_copy(meta, mFileMeta);
}
+ void setBufferGroup(MediaBufferGroupHelper *group) {
+ mBufferGroup = group;
+ }
protected:
struct Page {
uint64_t mGranulePosition;
@@ -110,6 +110,7 @@
int64_t mTimeUs;
};
+ MediaBufferGroupHelper *mBufferGroup;
DataSourceHelper *mSource;
off64_t mOffset;
Page mCurrentPage;
@@ -148,7 +149,7 @@
// 1 - bitstream identification header
// 3 - comment header
// 5 - codec setup header (Vorbis only)
- virtual media_status_t verifyHeader(MediaBufferBase *buffer, uint8_t type) = 0;
+ virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type) = 0;
// Read the next ogg packet from the underlying data source; optionally
// calculate the timestamp for the output packet whilst pretending
@@ -156,9 +157,9 @@
//
// *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
// clients are responsible for releasing the original buffer.
- media_status_t _readNextPacket(MediaBufferBase **buffer, bool calcVorbisTimestamp);
+ media_status_t _readNextPacket(MediaBufferHelper **buffer, bool calcVorbisTimestamp);
- int32_t getPacketBlockSize(MediaBufferBase *buffer);
+ int32_t getPacketBlockSize(MediaBufferHelper *buffer);
void parseFileMetaData();
@@ -182,7 +183,7 @@
virtual uint64_t approxBitrate() const;
- virtual media_status_t readNextPacket(MediaBufferBase **buffer) {
+ virtual media_status_t readNextPacket(MediaBufferHelper **buffer) {
return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
}
@@ -194,7 +195,7 @@
return granulePos * 1000000ll / mVi.rate;
}
- virtual media_status_t verifyHeader(MediaBufferBase *buffer, uint8_t type);
+ virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type);
};
struct MyOpusExtractor : public MyOggExtractor {
@@ -212,16 +213,16 @@
return 0;
}
- virtual media_status_t readNextPacket(MediaBufferBase **buffer);
+ virtual media_status_t readNextPacket(MediaBufferHelper **buffer);
protected:
virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
- virtual media_status_t verifyHeader(MediaBufferBase *buffer, uint8_t type);
+ virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type);
private:
- media_status_t verifyOpusHeader(MediaBufferBase *buffer);
- media_status_t verifyOpusComments(MediaBufferBase *buffer);
- uint32_t getNumSamplesInPacket(MediaBufferBase *buffer) const;
+ media_status_t verifyOpusHeader(MediaBufferHelper *buffer);
+ media_status_t verifyOpusComments(MediaBufferHelper *buffer);
+ uint32_t getNumSamplesInPacket(MediaBufferHelper *buffer) const;
uint8_t mChannelCount;
uint16_t mCodecDelay;
@@ -249,7 +250,9 @@
if (mStarted) {
return AMEDIA_ERROR_INVALID_OPERATION;
}
-
+ // initialize buffer group with a single small buffer, but a generous upper limit
+ mBufferGroup->init(1 /* number of buffers */, 128 /* size */, 64 /* max number of buffers */);
+ mExtractor->mImpl->setBufferGroup(mBufferGroup);
mStarted = true;
return AMEDIA_OK;
@@ -262,7 +265,7 @@
}
media_status_t OggSource::read(
- MediaBufferBase **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -274,26 +277,27 @@
}
}
- MediaBufferBase *packet;
+ MediaBufferHelper *packet;
media_status_t err = mExtractor->mImpl->readNextPacket(&packet);
if (err != AMEDIA_OK) {
return err;
}
+ AMediaFormat *meta = packet->meta_data();
#if 0
int64_t timeUs;
- if (packet->meta_data().findInt64(kKeyTime, &timeUs)) {
+ if (AMediaFormat_findInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeStampUs)) {
ALOGI("found time = %lld us", timeUs);
} else {
ALOGI("NO time");
}
#endif
- packet->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
*out = packet;
-
+ ALOGV("returning buffer %p", packet);
return AMEDIA_OK;
}
@@ -304,7 +308,8 @@
const char *mimeType,
size_t numHeaders,
int64_t seekPreRollUs)
- : mSource(source),
+ : mBufferGroup(NULL),
+ mSource(source),
mOffset(0),
mCurGranulePosition(0),
mPrevGranulePosition(0),
@@ -573,13 +578,13 @@
return sizeof(header) + page->mNumSegments + totalSize;
}
-media_status_t MyOpusExtractor::readNextPacket(MediaBufferBase **out) {
+media_status_t MyOpusExtractor::readNextPacket(MediaBufferHelper **out) {
if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
// The first sample might not start at time 0; find out where by subtracting
// the number of samples on the first page from the granule position
// (position of last complete sample) of the first page. This happens
// the first time before we attempt to read a packet from the first page.
- MediaBufferBase *mBuf;
+ MediaBufferHelper *mBuf;
uint32_t numSamples = 0;
uint64_t curGranulePosition = 0;
while (true) {
@@ -617,24 +622,25 @@
int32_t currentPageSamples;
// Calculate timestamps by accumulating durations starting from the first sample of a page;
// We assume that we only seek to page boundaries.
- if ((*out)->meta_data().findInt32(kKeyValidSamples, ¤tPageSamples)) {
+ AMediaFormat *meta = (*out)->meta_data();
+ if (AMediaFormat_getInt32(meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, ¤tPageSamples)) {
// first packet in page
if (mOffset == mFirstDataOffset) {
currentPageSamples -= mStartGranulePosition;
- (*out)->meta_data().setInt32(kKeyValidSamples, currentPageSamples);
+ AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, currentPageSamples);
}
mCurGranulePosition = mCurrentPage.mGranulePosition - currentPageSamples;
}
int64_t timeUs = getTimeUsOfGranule(mCurGranulePosition);
- (*out)->meta_data().setInt64(kKeyTime, timeUs);
+ AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
uint32_t frames = getNumSamplesInPacket(*out);
mCurGranulePosition += frames;
return AMEDIA_OK;
}
-uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBufferBase *buffer) const {
+uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBufferHelper *buffer) const {
if (buffer == NULL || buffer->range_length() < 1) {
return 0;
}
@@ -680,10 +686,66 @@
return numSamples;
}
-media_status_t MyOggExtractor::_readNextPacket(MediaBufferBase **out, bool calcVorbisTimestamp) {
+/*
+ * basic mediabuffer implementation used during initial parsing of the
+ * header packets, which happens before we have a buffer group
+ */
+class StandAloneMediaBuffer : public MediaBufferHelper {
+private:
+ void *mData;
+ size_t mSize;
+ size_t mOffset;
+ size_t mLength;
+ AMediaFormat *mFormat;
+public:
+ StandAloneMediaBuffer(size_t size) : MediaBufferHelper(NULL) {
+ mSize = size;
+ mData = malloc(mSize);
+ mOffset = 0;
+ mLength = mSize;
+ mFormat = AMediaFormat_new();
+ ALOGV("created standalone media buffer %p of size %zu", this, mSize);
+ }
+
+ ~StandAloneMediaBuffer() override {
+ free(mData);
+ AMediaFormat_delete(mFormat);
+ ALOGV("deleted standalone media buffer %p of size %zu", this, mSize);
+ }
+
+ void release() override {
+ delete this;
+ }
+
+ void* data() override {
+ return mData;
+ }
+
+ size_t size() override {
+ return mSize;
+ }
+
+ size_t range_offset() override {
+ return mOffset;
+ }
+
+ size_t range_length() override {
+ return mLength;
+ }
+
+ void set_range(size_t offset, size_t length) override {
+ mOffset = offset;
+ mLength = length;
+ }
+ AMediaFormat *meta_data() override {
+ return mFormat;
+ }
+};
+
+media_status_t MyOggExtractor::_readNextPacket(MediaBufferHelper **out, bool calcVorbisTimestamp) {
*out = NULL;
- MediaBufferBase *buffer = NULL;
+ MediaBufferHelper *buffer = NULL;
int64_t timeUs = -1;
for (;;) {
@@ -719,7 +781,13 @@
ALOGE("b/36592202");
return AMEDIA_ERROR_MALFORMED;
}
- MediaBufferBase *tmp = MediaBufferBase::Create(fullSize);
+ MediaBufferHelper *tmp;
+ if (mBufferGroup) {
+ mBufferGroup->acquire_buffer(&tmp, false, fullSize);
+ ALOGV("acquired buffer %p from group", tmp);
+ } else {
+ tmp = new StandAloneMediaBuffer(fullSize);
+ }
if (tmp == NULL) {
if (buffer != NULL) {
buffer->release();
@@ -727,6 +795,7 @@
ALOGE("b/36592202");
return AMEDIA_ERROR_MALFORMED;
}
+ AMediaFormat_clear(tmp->meta_data());
if (buffer != NULL) {
memcpy(tmp->data(), buffer->data(), buffer->range_length());
tmp->set_range(0, buffer->range_length());
@@ -756,8 +825,9 @@
// We've just read the entire packet.
if (mFirstPacketInPage) {
- buffer->meta_data().setInt32(
- kKeyValidSamples, mCurrentPageSamples);
+ AMediaFormat *meta = buffer->meta_data();
+ AMediaFormat_setInt32(
+ meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, mCurrentPageSamples);
mFirstPacketInPage = false;
}
@@ -778,7 +848,8 @@
mCurrentPage.mPrevPacketPos += actualBlockSize / 2;
mCurrentPage.mPrevPacketSize = curBlockSize;
}
- buffer->meta_data().setInt64(kKeyTime, timeUs);
+ AMediaFormat *meta = buffer->meta_data();
+ AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
}
*out = buffer;
@@ -824,11 +895,13 @@
// is already complete.
if (timeUs >= 0) {
- buffer->meta_data().setInt64(kKeyTime, timeUs);
+ AMediaFormat *meta = buffer->meta_data();
+ AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
}
- buffer->meta_data().setInt32(
- kKeyValidSamples, mCurrentPageSamples);
+ AMediaFormat *meta = buffer->meta_data();
+ AMediaFormat_setInt32(
+ meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, mCurrentPageSamples);
mFirstPacketInPage = false;
*out = buffer;
@@ -843,7 +916,7 @@
AMediaFormat_setString(mMeta, AMEDIAFORMAT_KEY_MIME, mMimeType);
media_status_t err;
- MediaBufferBase *packet;
+ MediaBufferHelper *packet;
for (size_t i = 0; i < mNumHeaders; ++i) {
// ignore timestamp for configuration packets
if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != AMEDIA_OK) {
@@ -920,7 +993,7 @@
}
}
-int32_t MyOggExtractor::getPacketBlockSize(MediaBufferBase *buffer) {
+int32_t MyOggExtractor::getPacketBlockSize(MediaBufferHelper *buffer) {
const uint8_t *data =
(const uint8_t *)buffer->data() + buffer->range_offset();
@@ -960,7 +1033,7 @@
return pcmSamplePosition * 1000000ll / kOpusSampleRate;
}
-media_status_t MyOpusExtractor::verifyHeader(MediaBufferBase *buffer, uint8_t type) {
+media_status_t MyOpusExtractor::verifyHeader(MediaBufferHelper *buffer, uint8_t type) {
switch (type) {
// there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
// header and comments such that we can share code with MyVorbisExtractor.
@@ -973,7 +1046,7 @@
}
}
-media_status_t MyOpusExtractor::verifyOpusHeader(MediaBufferBase *buffer) {
+media_status_t MyOpusExtractor::verifyOpusHeader(MediaBufferHelper *buffer) {
const size_t kOpusHeaderSize = 19;
const uint8_t *data =
(const uint8_t *)buffer->data() + buffer->range_offset();
@@ -1001,7 +1074,7 @@
return AMEDIA_OK;
}
-media_status_t MyOpusExtractor::verifyOpusComments(MediaBufferBase *buffer) {
+media_status_t MyOpusExtractor::verifyOpusComments(MediaBufferHelper *buffer) {
// add artificial framing bit so we can reuse _vorbis_unpack_comment
int32_t commentSize = buffer->range_length() + 1;
auto tmp = heapbuffer<uint8_t>(commentSize);
@@ -1094,7 +1167,7 @@
}
media_status_t MyVorbisExtractor::verifyHeader(
- MediaBufferBase *buffer, uint8_t type) {
+ MediaBufferHelper *buffer, uint8_t type) {
const uint8_t *data =
(const uint8_t *)buffer->data() + buffer->range_offset();
@@ -1262,7 +1335,7 @@
return mInitCheck != OK ? 0 : 1;
}
-MediaTrackHelperV2 *OggExtractor::getTrack(size_t index) {
+MediaTrackHelper *OggExtractor::getTrack(size_t index) {
if (index >= 1) {
return NULL;
}
@@ -1284,13 +1357,13 @@
return mImpl->getFileMetaData(meta);
}
-static CMediaExtractorV2* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
CDataSource *source,
void *) {
- return wrapV2(new OggExtractor(new DataSourceHelper(source)));
+ return wrap(new OggExtractor(new DataSourceHelper(source)));
}
-static CreatorFuncV2 Sniff(
+static CreatorFunc Sniff(
CDataSource *source,
float *confidence,
void **,
@@ -1311,7 +1384,7 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT,
+ EXTRACTORDEF_VERSION,
UUID("8cc5cd06-f772-495e-8a62-cba9649374e9"),
1, // version
"Ogg Extractor",
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/OggExtractor.h
index cd674f3..c75dfa9 100644
--- a/media/extractors/ogg/OggExtractor.h
+++ b/media/extractors/ogg/OggExtractor.h
@@ -31,11 +31,11 @@
struct MyOggExtractor;
struct OggSource;
-struct OggExtractor : public MediaExtractorPluginHelperV2 {
+struct OggExtractor : public MediaExtractorPluginHelper {
explicit OggExtractor(DataSourceHelper *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV2 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index 1f0aae5..6f9f689 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -66,7 +66,7 @@
return ptr[1] << 8 | ptr[0];
}
-struct WAVSource : public MediaTrackHelperV3 {
+struct WAVSource : public MediaTrackHelper {
WAVSource(
DataSourceHelper *dataSource,
AMediaFormat *meta,
@@ -79,7 +79,7 @@
virtual media_status_t getFormat(AMediaFormat *meta);
virtual media_status_t read(
- MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+ MediaBufferHelper **buffer, const ReadOptions *options = NULL);
virtual bool supportNonblockingRead() { return true; }
@@ -131,7 +131,7 @@
return mInitCheck == OK ? 1 : 0;
}
-MediaTrackHelperV3 *WAVExtractor::getTrack(size_t index) {
+MediaTrackHelper *WAVExtractor::getTrack(size_t index) {
if (mInitCheck != OK || index > 0) {
return NULL;
}
@@ -428,7 +428,7 @@
}
media_status_t WAVSource::read(
- MediaBufferHelperV3 **out, const ReadOptions *options) {
+ MediaBufferHelper **out, const ReadOptions *options) {
*out = NULL;
if (options != nullptr && options->getNonBlocking() && !mBufferGroup->has_buffers()) {
@@ -454,7 +454,7 @@
mCurrentPos = pos + mOffset;
}
- MediaBufferHelperV3 *buffer;
+ MediaBufferHelper *buffer;
media_status_t err = mBufferGroup->acquire_buffer(&buffer);
if (err != OK) {
return err;
@@ -581,13 +581,13 @@
////////////////////////////////////////////////////////////////////////////////
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
CDataSource *source,
void *) {
- return wrapV3(new WAVExtractor(new DataSourceHelper(source)));
+ return wrap(new WAVExtractor(new DataSourceHelper(source)));
}
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
CDataSource *source,
float *confidence,
void **,
@@ -621,11 +621,11 @@
__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
return {
- EXTRACTORDEF_VERSION_CURRENT + 1,
+ EXTRACTORDEF_VERSION,
UUID("7d613858-5837-4a38-84c5-332d1cddee27"),
1, // version
"WAV Extractor",
- { .v3 = Sniff }
+ { .v2 = Sniff }
};
}
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/WAVExtractor.h
index 9b7dfde..b514196 100644
--- a/media/extractors/wav/WAVExtractor.h
+++ b/media/extractors/wav/WAVExtractor.h
@@ -29,12 +29,12 @@
struct CDataSource;
class String8;
-class WAVExtractor : public MediaExtractorPluginHelperV3 {
+class WAVExtractor : public MediaExtractorPluginHelper {
public:
explicit WAVExtractor(DataSourceHelper *source);
virtual size_t countTracks();
- virtual MediaTrackHelperV3 *getTrack(size_t index);
+ virtual MediaTrackHelper *getTrack(size_t index);
virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 4a0e6da..58ef7b1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -258,7 +258,7 @@
callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
break;
}
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 2ae37a5..9af47b2 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -293,7 +293,7 @@
break;
}
} else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGV("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+ ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
break;
}
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 319467e..cb243a0 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -184,3 +184,15 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_return_stop",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_return_stop.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_return_stop.cpp b/media/libaaudio/tests/test_return_stop.cpp
new file mode 100644
index 0000000..9a9e00c
--- /dev/null
+++ b/media/libaaudio/tests/test_return_stop.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Return stop from the callback.
+ * Expect the callback to cease.
+ * Check the logcat for bad behavior.
+ */
+
+#include <stdio.h>
+#include <thread>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+#define STOP_AT_MSEC 1000
+#define LOOP_DURATION_MSEC 4000
+#define SLEEP_DURATION_MSEC 200
+
+static void s_myErrorCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ aaudio_result_t error);
+
+struct AudioEngine {
+ AAudioStreamBuilder *builder = nullptr;
+ AAudioStream *stream = nullptr;
+ std::thread *thread = nullptr;
+ int32_t stopAtFrame = 0;
+ bool stopped = false;
+ // These counters are read and written by the callback and the main thread.
+ std::atomic<int32_t> framesRead{};
+ std::atomic<int32_t> startingFramesRead{};
+ std::atomic<int32_t> framesCalled{};
+ std::atomic<int32_t> callbackCount{};
+ std::atomic<int32_t> callbackCountAfterStop{};
+
+ void reset() {
+ framesRead.store(0);
+ startingFramesRead.store(0);
+ framesCalled.store(0);
+ callbackCount.store(0);
+ callbackCountAfterStop.store(0);
+ stopped = false;
+ }
+};
+
+// Callback function that fills the audio output buffer.
+static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) audioData;
+ (void) numFrames;
+ AudioEngine *engine = (struct AudioEngine *)userData;
+ engine->callbackCount++;
+ if (engine->stopped) {
+ engine->callbackCountAfterStop++;
+ }
+
+ engine->framesRead = (int32_t)AAudioStream_getFramesRead(stream);
+ if (engine->startingFramesRead == 0) {
+ engine->startingFramesRead.store(engine->framesRead.load());
+ }
+ engine->framesCalled += numFrames;
+ if (engine->framesCalled >= engine->stopAtFrame) {
+ engine->stopped = true;
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ } else {
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+}
+
+static aaudio_result_t s_OpenAudioStream(struct AudioEngine *engine,
+ aaudio_direction_t direction,
+ aaudio_sharing_mode_t sharingMode,
+ aaudio_performance_mode_t perfMode) {
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ aaudio_result_t result = AAudio_createStreamBuilder(&engine->builder);
+ if (result != AAUDIO_OK) {
+ printf("AAudio_createStreamBuilder returned %s",
+ AAudio_convertResultToText(result));
+ return result;
+ }
+
+ // Request stream properties.
+ AAudioStreamBuilder_setFormat(engine->builder, AAUDIO_FORMAT_PCM_FLOAT);
+ AAudioStreamBuilder_setPerformanceMode(engine->builder, perfMode);
+ AAudioStreamBuilder_setSharingMode(engine->builder, sharingMode);
+ AAudioStreamBuilder_setDirection(engine->builder, direction);
+ AAudioStreamBuilder_setDataCallback(engine->builder, s_myDataCallbackProc, engine);
+ AAudioStreamBuilder_setErrorCallback(engine->builder, s_myErrorCallbackProc, engine);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(engine->builder, &engine->stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStreamBuilder_openStream returned %s",
+ AAudio_convertResultToText(result));
+ return result;
+ }
+
+ return result;
+}
+
+static aaudio_result_t s_CloseAudioStream(struct AudioEngine *engine) {
+ aaudio_result_t result = AAUDIO_OK;
+ if (engine->stream != nullptr) {
+ result = AAudioStream_close(engine->stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_close returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+ engine->stream = nullptr;
+ }
+ AAudioStreamBuilder_delete(engine->builder);
+ engine->builder = nullptr;
+ return result;
+}
+
+static void s_myErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ printf("%s() - error = %d\n", __func__, error);
+}
+
+void usage() {
+ printf("test_return_stop [-i] [-x] [-n] [-c]\n");
+ printf(" -i direction INPUT, otherwise OUTPUT\n");
+ printf(" -x sharing mode EXCLUSIVE, otherwise SHARED\n");
+ printf(" -n performance mode NONE, otherwise LOW_LATENCY\n");
+ printf(" -c always return CONTINUE from callback, not STOP\n");
+}
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void) argv;
+ struct AudioEngine engine;
+ aaudio_sharing_mode_t sharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT;
+ aaudio_result_t result = AAUDIO_OK;
+ bool alwaysContinue = false;
+ int errorCount = 0;
+ int callbackResult = EXIT_SUCCESS;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Return Stop V1.0\n");
+ printf("Wait for a few seconds.\n");
+ printf("You should see callbackCount and framesRead stop advancing\n");
+ printf("when callbackCount reaches %d msec\n", STOP_AT_MSEC);
+ printf("\n");
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'c':
+ alwaysContinue = true;
+ break;
+ case 'i':
+ direction = AAUDIO_DIRECTION_INPUT;
+ break;
+ case 'n':
+ perfMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ break;
+ case 'x':
+ sharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
+
+ result = s_OpenAudioStream(&engine, direction, sharingMode, perfMode);
+ if (result != AAUDIO_OK) {
+ printf("s_OpenAudioStream returned %s",
+ AAudio_convertResultToText(result));
+ errorCount++;
+ }
+
+ int32_t framesPerBurst = AAudioStream_getFramesPerBurst(engine.stream);
+ // Check to see what kind of stream we actually got.
+ int32_t deviceId = AAudioStream_getDeviceId(engine.stream);
+ aaudio_performance_mode_t actualPerfMode = AAudioStream_getPerformanceMode(engine.stream);
+ printf("-------- opened: deviceId = %3d, framesPerBurst = %3d, perfMode = %d\n",
+ deviceId, framesPerBurst, actualPerfMode);
+
+ // Calculate how many callbacks needed.
+ if (alwaysContinue) {
+ engine.stopAtFrame = INT32_MAX;
+ } else {
+ int32_t sampleRate = AAudioStream_getSampleRate(engine.stream);
+ engine.stopAtFrame = STOP_AT_MSEC * sampleRate / 1000;
+ }
+
+ for (int loops = 0; loops < 2 && result == AAUDIO_OK; loops++) {
+ engine.reset();
+
+ // Start stream.
+ result = AAudioStream_requestStart(engine.stream);
+ printf("AAudioStream_requestStart() returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
+ if (result != AAUDIO_OK) {
+ errorCount++;
+ break;
+ }
+
+ if (result == AAUDIO_OK) {
+ const int watchLoops = LOOP_DURATION_MSEC / SLEEP_DURATION_MSEC;
+ for (int i = watchLoops; i > 0; i--) {
+ printf("playing silence #%02d, framesRead = %7d, framesWritten = %7d,"
+ " framesCalled = %6d, callbackCount = %4d\n",
+ i,
+ (int32_t) AAudioStream_getFramesRead(engine.stream),
+ (int32_t) AAudioStream_getFramesWritten(engine.stream),
+ engine.framesCalled.load(),
+ engine.callbackCount.load()
+ );
+ usleep(SLEEP_DURATION_MSEC * 1000);
+ }
+ }
+
+ if (engine.stopAtFrame != INT32_MAX) {
+ callbackResult = (engine.callbackCountAfterStop == 0) ? EXIT_SUCCESS
+ : EXIT_FAILURE;
+ if (callbackResult) {
+ printf("ERROR - Callback count after STOP = %d\n",
+ engine.callbackCountAfterStop.load());
+ errorCount++;
+ }
+ }
+
+ if (engine.startingFramesRead.load() == engine.framesRead.load()) {
+ printf("ERROR - framesRead did not advance across callbacks\n");
+ errorCount++;
+ }
+
+ result = AAudioStream_requestStop(engine.stream);
+ printf("AAudioStream_requestStop() returned %d <<<<<<<<<<<<<<<<<<<<<\n", result);
+ if (result != AAUDIO_OK) {
+ errorCount++;
+ }
+ usleep(SLEEP_DURATION_MSEC * 1000);
+ printf("getFramesRead() = %d, getFramesWritten() = %d\n",
+ (int32_t) AAudioStream_getFramesRead(engine.stream),
+ (int32_t) AAudioStream_getFramesWritten(engine.stream));
+ }
+
+ s_CloseAudioStream(&engine);
+
+ printf("aaudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ printf("test %s\n", errorCount ? "FAILED" : "PASSED");
+
+ return errorCount ? EXIT_FAILURE : EXIT_SUCCESS;
+}
diff --git a/media/libaudioclient/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
index d1f7525..9601d6d 100644
--- a/media/libaudioclient/AudioPolicy.cpp
+++ b/media/libaudioclient/AudioPolicy.cpp
@@ -22,6 +22,22 @@
namespace android {
//
+// AudioDeviceTypeAddr implementation
+//
+status_t AudioDeviceTypeAddr::readFromParcel(Parcel *parcel) {
+ mType = (audio_devices_t) parcel->readInt32();
+ mAddress = parcel->readString8();
+ return NO_ERROR;
+}
+
+status_t AudioDeviceTypeAddr::writeToParcel(Parcel *parcel) const {
+ parcel->writeInt32((int32_t) mType);
+ parcel->writeString8(mAddress);
+ return NO_ERROR;
+}
+
+
+//
// AudioMixMatchCriterion implementation
//
AudioMixMatchCriterion::AudioMixMatchCriterion(audio_usage_t usage,
@@ -40,11 +56,22 @@
status_t AudioMixMatchCriterion::readFromParcel(Parcel *parcel)
{
mRule = parcel->readInt32();
- if (mRule == RULE_MATCH_ATTRIBUTE_USAGE ||
- mRule == RULE_EXCLUDE_ATTRIBUTE_USAGE) {
- mValue.mUsage = (audio_usage_t)parcel->readInt32();
- } else {
- mValue.mSource = (audio_source_t)parcel->readInt32();
+ switch (mRule) {
+ case RULE_MATCH_ATTRIBUTE_USAGE:
+ case RULE_EXCLUDE_ATTRIBUTE_USAGE:
+ mValue.mUsage = (audio_usage_t) parcel->readInt32();
+ break;
+ case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
+ case RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET:
+ mValue.mSource = (audio_source_t) parcel->readInt32();
+ break;
+ case RULE_MATCH_UID:
+ case RULE_EXCLUDE_UID:
+ mValue.mUid = (uid_t) parcel->readInt32();
+ break;
+ default:
+ ALOGE("Trying to build AudioMixMatchCriterion from unknown rule %d", mRule);
+ return BAD_VALUE;
}
return NO_ERROR;
}
@@ -116,4 +143,11 @@
return NO_ERROR;
}
+void AudioMix::excludeUid(uid_t uid) const {
+ AudioMixMatchCriterion crit;
+ crit.mRule = RULE_EXCLUDE_UID;
+ crit.mValue.mUid = uid;
+ mCriteria.add(crit);
+}
+
} // namespace android
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 3e91717..baeae8b 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1236,6 +1236,19 @@
return aps->registerPolicyMixes(mixes, registration);
}
+status_t AudioSystem::setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setUidDeviceAffinities(uid, devices);
+}
+
+status_t AudioSystem::removeUidDeviceAffinities(uid_t uid) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->removeUidDeviceAffinities(uid);
+}
+
status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId)
@@ -1388,9 +1401,14 @@
}
void AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
- int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle) {
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source) {
record_config_callback cb = NULL;
{
Mutex::Autolock _l(AudioSystem::gLock);
@@ -1398,7 +1416,8 @@
}
if (cb != NULL) {
- cb(event, clientInfo, clientConfig, deviceConfig, patchHandle);
+ cb(event, clientInfo, clientConfig, clientEffects,
+ deviceConfig, effects, patchHandle, source);
}
}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 1f6dd60..b444d2d 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -952,7 +952,8 @@
if (rate == mSampleRate) {
return NO_ERROR;
}
- if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+ if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
+ || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
return INVALID_OPERATION;
}
if (mOutput == AUDIO_IO_HANDLE_NONE) {
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0ce8b16..272415c 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -90,6 +90,8 @@
SET_ASSISTANT_UID,
SET_A11Y_SERVICES_UIDS,
IS_HAPTIC_PLAYBACK_SUPPORTED,
+ SET_UID_DEVICE_AFFINITY,
+ REMOVE_UID_DEVICE_AFFINITY,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -990,6 +992,50 @@
return reply.readBool();
}
+ virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ data.writeInt32((int32_t) uid);
+ size_t size = devices.size();
+ size_t sizePosition = data.dataPosition();
+ data.writeInt32((int32_t) size);
+ size_t finalSize = size;
+ for (size_t i = 0; i < size; i++) {
+ size_t position = data.dataPosition();
+ if (devices[i].writeToParcel(&data) != NO_ERROR) {
+ data.setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = data.dataPosition();
+ data.setDataPosition(sizePosition);
+ data.writeInt32(finalSize);
+ data.setDataPosition(position);
+ }
+
+ status_t status = remote()->transact(SET_UID_DEVICE_AFFINITY, data, &reply);
+ if (status == NO_ERROR) {
+ status = (status_t)reply.readInt32();
+ }
+ return status;
+ }
+
+ virtual status_t removeUidDeviceAffinities(uid_t uid)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ data.writeInt32((int32_t) uid);
+
+ status_t status = remote()->transact(REMOVE_UID_DEVICE_AFFINITY, data, &reply);
+ if (status == NO_ERROR) {
+ status = (status_t)reply.readInt32();
+ }
+ return status;
+ }
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1048,7 +1094,9 @@
case GET_SURROUND_FORMATS:
case SET_SURROUND_FORMAT_ENABLED:
case SET_ASSISTANT_UID:
- case SET_A11Y_SERVICES_UIDS: {
+ case SET_A11Y_SERVICES_UIDS:
+ case SET_UID_DEVICE_AFFINITY:
+ case REMOVE_UID_DEVICE_AFFINITY: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1811,6 +1859,30 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
bool isSupported = isHapticPlaybackSupported();
reply->writeBool(isSupported);
+ return NO_ERROR;
+ }
+
+ case SET_UID_DEVICE_AFFINITY: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ const uid_t uid = (uid_t) data.readInt32();
+ Vector<AudioDeviceTypeAddr> devices;
+ size_t size = (size_t)data.readInt32();
+ for (size_t i = 0; i < size; i++) {
+ AudioDeviceTypeAddr device;
+ if (device.readFromParcel((Parcel*)&data) == NO_ERROR) {
+ devices.add(device);
+ }
+ }
+ status_t status = setUidDeviceAffinities(uid, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case REMOVE_UID_DEVICE_AFFINITY: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ const uid_t uid = (uid_t) data.readInt32();
+ status_t status = removeUidDeviceAffinities(uid);
+ reply->writeInt32(status);
return NO_ERROR;
}
diff --git a/media/libaudioclient/IAudioPolicyServiceClient.cpp b/media/libaudioclient/IAudioPolicyServiceClient.cpp
index ad7f1de..1f9eab7 100644
--- a/media/libaudioclient/IAudioPolicyServiceClient.cpp
+++ b/media/libaudioclient/IAudioPolicyServiceClient.cpp
@@ -52,12 +52,37 @@
clientInfo->uid = (uid_t) data.readUint32();
clientInfo->session = (audio_session_t) data.readInt32();
clientInfo->source = (audio_source_t) data.readInt32();
+ data.read(&clientInfo->port_id, sizeof(audio_port_handle_t));
+ clientInfo->silenced = data.readBool();
}
-inline void writeRecordClientInfoFromParcel(Parcel& data, const record_client_info_t *clientInfo) {
+inline void writeRecordClientInfoToParcel(Parcel& data, const record_client_info_t *clientInfo) {
data.writeUint32((uint32_t) clientInfo->uid);
data.writeInt32((int32_t) clientInfo->session);
data.writeInt32((int32_t) clientInfo->source);
+ data.write(&clientInfo->port_id, sizeof(audio_port_handle_t));
+ data.writeBool(clientInfo->silenced);
+}
+
+inline void readEffectVectorFromParcel(const Parcel& data,
+ std::vector<effect_descriptor_t> *effects) {
+ int32_t numEffects = data.readInt32();
+ for (int32_t i = 0; i < numEffects; i++) {
+ effect_descriptor_t effect;
+ if (data.read(&effect, sizeof(effect_descriptor_t)) != NO_ERROR) {
+ break;
+ }
+ (*effects).push_back(effect);
+ }
+}
+
+inline void writeEffectVectorToParcel(Parcel& data, std::vector<effect_descriptor_t> effects) {
+ data.writeUint32((uint32_t) effects.size());
+ for (const auto& effect : effects) {
+ if (data.write(&effect, sizeof(effect_descriptor_t)) != NO_ERROR) {
+ break;
+ }
+ }
}
// ----------------------------------------------------------------------
@@ -92,16 +117,24 @@
remote()->transact(MIX_STATE_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
}
- void onRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle) {
+ void onRecordingConfigurationUpdate(int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source) {
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
data.writeInt32(event);
- writeRecordClientInfoFromParcel(data, clientInfo);
+ writeRecordClientInfoToParcel(data, clientInfo);
writeAudioConfigBaseToParcel(data, clientConfig);
+ writeEffectVectorToParcel(data, clientEffects);
writeAudioConfigBaseToParcel(data, deviceConfig);
+ writeEffectVectorToParcel(data, effects);
data.writeInt32(patchHandle);
+ data.writeInt32((int32_t) source);
remote()->transact(RECORDING_CONFIGURATION_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -139,10 +172,15 @@
audio_config_base_t deviceConfig;
readRecordClientInfoFromParcel(data, &clientInfo);
readAudioConfigBaseFromParcel(data, &clientConfig);
+ std::vector<effect_descriptor_t> clientEffects;
+ readEffectVectorFromParcel(data, &clientEffects);
readAudioConfigBaseFromParcel(data, &deviceConfig);
+ std::vector<effect_descriptor_t> effects;
+ readEffectVectorFromParcel(data, &effects);
audio_patch_handle_t patchHandle = (audio_patch_handle_t) data.readInt32();
- onRecordingConfigurationUpdate(event, &clientInfo, &clientConfig, &deviceConfig,
- patchHandle);
+ audio_source_t source = (audio_source_t) data.readInt32();
+ onRecordingConfigurationUpdate(event, &clientInfo, &clientConfig, clientEffects,
+ &deviceConfig, effects, patchHandle, source);
return NO_ERROR;
} break;
default:
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index 8da0069..96e1235 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -56,6 +56,19 @@
#define MAX_MIXES_PER_POLICY 10
#define MAX_CRITERIA_PER_MIX 20
+class AudioDeviceTypeAddr {
+public:
+ AudioDeviceTypeAddr() {}
+ AudioDeviceTypeAddr(audio_devices_t type, String8 address) :
+ mType(type), mAddress(address) {}
+
+ status_t readFromParcel(Parcel *parcel);
+ status_t writeToParcel(Parcel *parcel) const;
+
+ audio_devices_t mType;
+ String8 mAddress;
+};
+
class AudioMixMatchCriterion {
public:
AudioMixMatchCriterion() {}
@@ -87,7 +100,9 @@
status_t readFromParcel(Parcel *parcel);
status_t writeToParcel(Parcel *parcel) const;
- Vector<AudioMixMatchCriterion> mCriteria;
+ void excludeUid(uid_t uid) const;
+
+ mutable Vector<AudioMixMatchCriterion> mCriteria;
uint32_t mMixType;
audio_config_t mFormat;
uint32_t mRouteFlags;
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 74156ca..781e9df 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -35,9 +35,14 @@
typedef void (*audio_error_callback)(status_t err);
typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
-typedef void (*record_config_callback)(int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle);
+typedef void (*record_config_callback)(int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
class IAudioFlinger;
class IAudioPolicyService;
@@ -320,6 +325,10 @@
static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
+ static status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+
+ static status_t removeUidDeviceAffinities(uid_t uid);
+
static status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId);
@@ -448,9 +457,13 @@
virtual void onAudioPatchListUpdate();
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
virtual void onRecordingConfigurationUpdate(int event,
- const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
private:
Mutex mLock;
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 61f3b27..fb4fe93 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -167,6 +167,11 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration) = 0;
+ virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ = 0;
+
+ virtual status_t removeUidDeviceAffinities(uid_t uid) = 0;
+
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId) = 0;
diff --git a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
index e0d2495..b3c0381 100644
--- a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
+++ b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
@@ -17,10 +17,12 @@
#ifndef ANDROID_IAUDIOPOLICYSERVICECLIENT_H
#define ANDROID_IAUDIOPOLICYSERVICECLIENT_H
+#include <vector>
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <system/audio.h>
+#include <system/audio_effect.h>
namespace android {
@@ -30,6 +32,8 @@
uid_t uid;
audio_session_t session;
audio_source_t source;
+ audio_port_handle_t port_id;
+ bool silenced;
};
typedef struct record_client_info record_client_info_t;
@@ -51,8 +55,11 @@
virtual void onRecordingConfigurationUpdate(int event,
const record_client_info_t *clientInfo,
const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle) = 0;
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source) = 0;
};
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index e21c235..bd3ef61 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -25,13 +25,13 @@
// static
sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
if (hardware::audio::effect::V5_0::IEffectsFactory::getService() != nullptr) {
- return V5_0::createEffectsFactoryHal();
+ return effect::V5_0::createEffectsFactoryHal();
}
if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
- return V4_0::createEffectsFactoryHal();
+ return effect::V4_0::createEffectsFactoryHal();
}
if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
- return V2_0::createEffectsFactoryHal();
+ return effect::V2_0::createEffectsFactoryHal();
}
return nullptr;
}
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
index 1e3ab58..6e33523 100644
--- a/media/libaudiohal/HalDeathHandlerHidl.cpp
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -54,7 +54,7 @@
handler.second();
}
ALOGE("HAL server crashed, audio server is restarting");
- exit(1);
+ _exit(1); // Avoid calling atexit handlers, as this code runs on a thread from RPC threadpool.
}
} // namespace android
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 9747859..9f8a520 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -22,19 +22,12 @@
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::CPP_VERSION::Result;
-
-#if MAJOR_VERSION >= 4
-using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneChannelMapping;
-using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneDirectionality;
-using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneLocation;
-using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
-using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
-#endif
-
namespace android {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::CPP_VERSION;
+
// static
status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
AudioParameter halKeys(keys);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 8537608..7a9e843 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -277,18 +277,18 @@
HidlUtils::audioConfigFromHal(*config, &hidlConfig);
Result retval = Result::NOT_INITIALIZED;
#if MAJOR_VERSION == 2
- auto sourceMetadata = AudioSource(source);
+ auto sinkMetadata = AudioSource(source);
#elif MAJOR_VERSION >= 4
// TODO: correctly propagate the tracks sources and volume
// for now, only send the main source at 1dbfs
- SinkMetadata sourceMetadata = {{{AudioSource(source), 1}}};
+ SinkMetadata sinkMetadata = {{{ .source = AudioSource(source), .gain = 1 }}};
#endif
Return<void> ret = mDevice->openInputStream(
handle,
hidlDevice,
hidlConfig,
EnumBitfield<AudioInputFlag>(flags),
- sourceMetadata,
+ sinkMetadata,
[&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 6ef4e8a..5367972 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -30,6 +30,7 @@
using ::android::hidl::allocator::V1_0::IAllocator;
namespace android {
+namespace effect {
namespace CPP_VERSION {
// static
@@ -142,5 +143,6 @@
memcpy(mExternalData, mAudioBuffer.raw, size);
}
+} // namespace effect
} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
index 0c99a02..4826813 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -23,13 +23,15 @@
#include <media/audiohal/EffectBufferHalInterface.h>
#include <system/audio_effect.h>
-using android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
using android::hardware::hidl_memory;
using android::hidl::memory::V1_0::IMemory;
namespace android {
+namespace effect {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::effect::CPP_VERSION;
+
class EffectBufferHalHidl : public EffectBufferHalInterface
{
public:
@@ -73,6 +75,7 @@
};
} // namespace CPP_VERSION
+} // namespace effect
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index df79b95..b0597b3 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -26,11 +26,6 @@
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
-using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferAccess;
-using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
-using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
using ::android::hardware::hidl_vec;
@@ -38,9 +33,11 @@
using ::android::hardware::Return;
namespace android {
+namespace effect {
namespace CPP_VERSION {
using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::effect::CPP_VERSION;
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
@@ -338,4 +335,5 @@
}
} // namespace CPP_VERSION
+} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index cd447ff..9d9f707 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -23,17 +23,15 @@
#include <fmq/MessageQueue.h>
#include <system/audio_effect.h>
-using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferConfig;
-using ::android::hardware::audio::effect::CPP_VERSION::EffectConfig;
-using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
-using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
-using EffectResult = ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
namespace android {
+namespace effect {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::effect::CPP_VERSION;
+
class EffectHalHidl : public EffectHalInterface
{
public:
@@ -70,7 +68,7 @@
private:
friend class EffectsFactoryHalHidl;
- typedef MessageQueue<EffectResult, hardware::kSynchronizedReadWrite> StatusMQ;
+ typedef MessageQueue<Result, hardware::kSynchronizedReadWrite> StatusMQ;
sp<IEffect> mEffect;
const uint64_t mEffectId;
@@ -80,7 +78,7 @@
std::unique_ptr<StatusMQ> mStatusMQ;
EventFlag* mEfGroup;
- static status_t analyzeResult(const EffectResult& result);
+ static status_t analyzeResult(const Result& result);
static void effectBufferConfigFromHal(
const buffer_config_t& halConfig, EffectBufferConfig* config);
static void effectBufferConfigToHal(
@@ -105,6 +103,7 @@
};
} // namespace CPP_VERSION
+} // namespace effect
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 7fea466..7fd6bde 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -26,14 +26,14 @@
#include "HidlUtils.h"
using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
-using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
-using ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::Return;
namespace android {
+namespace effect {
namespace CPP_VERSION {
using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::effect::CPP_VERSION;
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
mEffectsFactory = IEffectsFactory::getService();
@@ -145,6 +145,6 @@
return EffectBufferHalHidl::mirror(external, size, buffer);
}
-
} // namespace CPP_VERSION
+} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 7027153..01178ff 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -24,11 +24,12 @@
#include "ConversionHelperHidl.h"
namespace android {
+namespace effect {
namespace CPP_VERSION {
-using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
-using ::android::hardware::audio::effect::CPP_VERSION::IEffectsFactory;
using ::android::hardware::hidl_vec;
+using ::android::CPP_VERSION::ConversionHelperHidl;
+using namespace ::android::hardware::audio::effect::CPP_VERSION;
class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
{
@@ -70,6 +71,7 @@
}
} // namespace CPP_VERSION
+} // namespace effect
} // namespace android
#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 9765f1e..c12b362 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -35,6 +35,7 @@
namespace android {
namespace CPP_VERSION {
+using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
using namespace ::android::hardware::audio::common::CPP_VERSION;
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index 1d912a0..c7319d0 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -25,18 +25,29 @@
namespace android {
+namespace effect {
namespace V2_0 {
sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
} // namespace V2_0
namespace V4_0 {
sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
} // namespace V4_0
namespace V5_0 {
sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+} // namespace V5_0
+} // namespace effect
+
+namespace V2_0 {
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V2_0
+
+namespace V4_0 {
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V4_0
+
+namespace V5_0 {
sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
} // namespace V5_0
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
new file mode 100644
index 0000000..817fb0b
--- /dev/null
+++ b/media/libaudioprocessing/Android.bp
@@ -0,0 +1,54 @@
+cc_defaults {
+ name: "libaudioprocessing_defaults",
+
+ export_include_dirs: ["include"],
+
+ shared_libs: [
+ "libaudiohal",
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ "libnbaio",
+ "libnblog",
+ "libsonic",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+
+ // uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
+ // "-DUSE_NEON=false",
+ ],
+}
+
+cc_library_shared {
+ name: "libaudioprocessing",
+ defaults: ["libaudioprocessing_defaults"],
+
+ srcs: [
+ "BufferProviders.cpp",
+ "RecordBufferConverter.cpp",
+ ],
+ whole_static_libs: ["libaudioprocessing_arm"],
+}
+
+cc_library_static {
+ name: "libaudioprocessing_arm",
+ defaults: ["libaudioprocessing_defaults"],
+
+ srcs: [
+ "AudioMixer.cpp",
+ "AudioResampler.cpp",
+ "AudioResamplerCubic.cpp",
+ "AudioResamplerSinc.cpp",
+ "AudioResamplerDyn.cpp",
+ ],
+
+ arch: {
+ arm: {
+ instruction_set: "arm",
+ },
+ },
+}
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
deleted file mode 100644
index da1ecc2..0000000
--- a/media/libaudioprocessing/Android.mk
+++ /dev/null
@@ -1,40 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- AudioMixer.cpp.arm \
- AudioResampler.cpp.arm \
- AudioResamplerCubic.cpp.arm \
- AudioResamplerSinc.cpp.arm \
- AudioResamplerDyn.cpp.arm \
- BufferProviders.cpp \
- RecordBufferConverter.cpp \
-
-LOCAL_C_INCLUDES := \
- $(TOP) \
- $(call include-path-for, audio-utils) \
- $(LOCAL_PATH)/include \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_SHARED_LIBRARIES := \
- libaudiohal \
- libaudioutils \
- libcutils \
- liblog \
- libnbaio \
- libnblog \
- libsonic \
- libutils \
-
-LOCAL_MODULE := libaudioprocessing
-
-LOCAL_CFLAGS := -Werror -Wall
-
-# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
-#LOCAL_CFLAGS += -DUSE_NEON=false
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 2567b3b..86711de 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -302,14 +302,19 @@
if (audio_channel_mask_get_representation(channelMask)
== AUDIO_CHANNEL_REPRESENTATION_POSITION
&& DownmixerBufferProvider::isMultichannelCapable()) {
- mDownmixerBufferProvider.reset(new DownmixerBufferProvider(channelMask,
- mMixerChannelMask,
- AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
- sampleRate, sessionId, kCopyBufferFrameCount));
- if (static_cast<DownmixerBufferProvider *>(mDownmixerBufferProvider.get())->isValid()) {
- mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
- reconfigureBufferProviders();
- return NO_ERROR;
+
+ // Check if we have a float or int16 downmixer, in that order.
+ for (const audio_format_t format : { AUDIO_FORMAT_PCM_FLOAT, AUDIO_FORMAT_PCM_16_BIT }) {
+ mDownmixerBufferProvider.reset(new DownmixerBufferProvider(
+ channelMask, mMixerChannelMask,
+ format,
+ sampleRate, sessionId, kCopyBufferFrameCount));
+ if (static_cast<DownmixerBufferProvider *>(mDownmixerBufferProvider.get())
+ ->isValid()) {
+ mDownmixRequiresFormat = format;
+ reconfigureBufferProviders();
+ return NO_ERROR;
+ }
}
// mDownmixerBufferProvider reset below.
}
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index a1a1a0d..b764ccb 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -17,10 +17,12 @@
#define LOG_TAG "BufferProvider"
//#define LOG_NDEBUG 0
+#include <algorithm>
+
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
#include <audio_utils/channels.h>
-#include <external/sonic/sonic.h>
+#include <sonic.h>
#include <media/audiohal/EffectBufferHalInterface.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
@@ -36,13 +38,6 @@
namespace android {
// ----------------------------------------------------------------------------
-
-template <typename T>
-static inline T min(const T& a, const T& b)
-{
- return a < b ? a : b;
-}
-
CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
size_t outputFrameSize, size_t bufferFrameCount) :
mInputFrameSize(inputFrameSize),
@@ -100,8 +95,8 @@
mConsumed = 0;
}
ALOG_ASSERT(mConsumed < mBuffer.frameCount);
- size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
- count = min(count, pBuffer->frameCount);
+ size_t count = std::min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
+ count = std::min(count, pBuffer->frameCount);
pBuffer->raw = mLocalBufferData;
pBuffer->frameCount = count;
copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
@@ -491,7 +486,7 @@
}
// time-stretch the data
- dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
+ dstAvailable = std::min(mLocalBufferFrameCount - mRemaining, outputDesired);
size_t srcAvailable = mBuffer.frameCount;
processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
mBuffer.raw, &srcAvailable);
@@ -589,7 +584,7 @@
} else {
// cyclically repeat the source.
for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
- size_t remaining = min(*srcFrames, *dstFrames - count);
+ size_t remaining = std::min(*srcFrames, *dstFrames - count);
memcpy((uint8_t*)dstBuffer + mFrameSize * count,
srcBuffer, mFrameSize * remaining);
}
@@ -657,9 +652,9 @@
audio_format_t format, size_t inChannelCount, size_t outChannelCount,
audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
CopyBufferProvider(
- audio_bytes_per_frame(inChannelCount, format),
- audio_bytes_per_frame(outChannelCount, format),
- 0 /*bufferFrameCount*/),
+ audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
+ audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
+ contractedFrameCount),
mFormat(format),
mInChannelCount(inChannelCount),
mOutChannelCount(outChannelCount),
diff --git a/media/libaudioprocessing/audio-resampler/Android.bp b/media/libaudioprocessing/audio-resampler/Android.bp
new file mode 100644
index 0000000..dc70310
--- /dev/null
+++ b/media/libaudioprocessing/audio-resampler/Android.bp
@@ -0,0 +1,15 @@
+cc_library_shared {
+ name: "libaudio-resampler",
+
+ srcs: ["AudioResamplerCoefficients.cpp"],
+
+ shared_libs: [
+ "libutils",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/media/libaudioprocessing/audio-resampler/Android.mk b/media/libaudioprocessing/audio-resampler/Android.mk
deleted file mode 100644
index bb2807c..0000000
--- a/media/libaudioprocessing/audio-resampler/Android.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- AudioResamplerCoefficients.cpp
-
-LOCAL_MODULE := libaudio-resampler
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SHARED_LIBRARIES := libutils liblog
-
-LOCAL_CFLAGS += -Werror -Wall
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
new file mode 100644
index 0000000..811c16b
--- /dev/null
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -0,0 +1,51 @@
+// Build the unit tests for libaudioprocessing
+
+cc_defaults {
+ name: "libaudioprocessing_test_defaults",
+
+ header_libs: ["libbase_headers"],
+ shared_libs: [
+ "libaudioutils",
+ "libaudioprocessing",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
+
+//
+// resampler unit test
+//
+cc_test {
+ name: "resampler_tests",
+ defaults: ["libaudioprocessing_test_defaults"],
+
+ srcs: ["resampler_tests.cpp"],
+}
+
+//
+// audio mixer test tool
+//
+cc_binary {
+ name: "test-mixer",
+ defaults: ["libaudioprocessing_test_defaults"],
+
+ srcs: ["test-mixer.cpp"],
+ static_libs: ["libsndfile"],
+}
+
+//
+// build audio resampler test tool
+//
+cc_binary {
+ name: "test-resampler",
+ defaults: ["libaudioprocessing_test_defaults"],
+
+ srcs: ["test-resampler.cpp"],
+ static_libs: ["libsndfile"],
+}
diff --git a/media/libaudioprocessing/tests/Android.mk b/media/libaudioprocessing/tests/Android.mk
deleted file mode 100644
index 31ffbdc..0000000
--- a/media/libaudioprocessing/tests/Android.mk
+++ /dev/null
@@ -1,93 +0,0 @@
-# Build the unit tests for libaudioprocessing
-
-LOCAL_PATH := $(call my-dir)
-
-#
-# resampler unit test
-#
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioutils \
- libaudioprocessing \
- libcutils \
- liblog \
- libutils \
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
-
-LOCAL_SRC_FILES := \
- resampler_tests.cpp
-
-LOCAL_HEADER_LIBRARIES := libbase_headers
-
-LOCAL_MODULE := resampler_tests
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_NATIVE_TEST)
-
-#
-# audio mixer test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- test-mixer.cpp \
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
-
-LOCAL_STATIC_LIBRARIES := \
- libsndfile \
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioprocessing \
- libaudioutils \
- libcutils \
- liblog \
- libutils \
-
-LOCAL_HEADER_LIBRARIES := libbase_headers
-
-LOCAL_MODULE := test-mixer
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
-
-#
-# build audio resampler test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- test-resampler.cpp \
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
-
-LOCAL_STATIC_LIBRARIES := \
- libsndfile \
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioprocessing \
- libaudioutils \
- libcutils \
- liblog \
- libutils \
-
-LOCAL_HEADER_LIBRARIES := libbase_headers
-
-LOCAL_MODULE := test-resampler
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 351b1ee..76b4adc 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -305,7 +305,7 @@
return parseWithPath(path);
}
- for (std::string location : DEFAULT_LOCATIONS) {
+ for (const std::string& location : DEFAULT_LOCATIONS) {
std::string defaultPath = location + '/' + DEFAULT_NAME;
if (access(defaultPath.c_str(), R_OK) != 0) {
continue;
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index 227f2a1..9c82b1d 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -13,7 +13,7 @@
relative_install_path: "soundfx",
cflags: [
- //"-DBUILD_FLOAT",
+ "-DBUILD_FLOAT",
"-fvisibility=hidden",
"-Wall",
"-Werror",
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index b4a1d77..99ac4f5 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -31,10 +31,12 @@
// Do not submit with DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER defined, strictly for testing
//#define DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER 0
-#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
-
#ifdef BUILD_FLOAT
#define MINUS_3_DB_IN_FLOAT 0.70710678f // -3dB = 0.70710678f
+const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_FLOAT;
+#else
+#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
+const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_16_BIT;
#endif
// subset of possible audio_channel_mask_t values, and AUDIO_CHANNEL_OUT_* renamed to CHANNEL_MASK_*
@@ -703,7 +705,7 @@
memset(&pDwmModule->context, 0, sizeof(downmix_object_t));
pDwmModule->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
- pDwmModule->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pDwmModule->config.inputCfg.format = gTargetFormat;
pDwmModule->config.inputCfg.channels = AUDIO_CHANNEL_OUT_7POINT1;
pDwmModule->config.inputCfg.bufferProvider.getBuffer = NULL;
pDwmModule->config.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -715,7 +717,7 @@
// set a default value for the access mode, but should be overwritten by caller
pDwmModule->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
- pDwmModule->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pDwmModule->config.outputCfg.format = gTargetFormat;
pDwmModule->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
pDwmModule->config.outputCfg.bufferProvider.getBuffer = NULL;
pDwmModule->config.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -762,8 +764,8 @@
// Check configuration compatibility with build options, and effect capabilities
if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate
|| pConfig->outputCfg.channels != DOWNMIX_OUTPUT_CHANNELS
- || pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT
- || pConfig->outputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ || pConfig->inputCfg.format != gTargetFormat
+ || pConfig->outputCfg.format != gTargetFormat) {
ALOGE("Downmix_Configure error: invalid config");
return -EINVAL;
}
@@ -1185,8 +1187,8 @@
if (accumulate) {
while (numFrames) {
// centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+ centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
+ + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
// FL + centerPlusLfeContrib + SL + RL
lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
// FR + centerPlusLfeContrib + SR + RR
@@ -1427,4 +1429,4 @@
}
return true;
}
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
index a1fa79a..cc066b0 100644
--- a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
+++ b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
@@ -199,8 +199,10 @@
#define LVDBE_CAP_FS_44100 128
#define LVDBE_CAP_FS_48000 256
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
-#define LVDBE_CAP_FS_96000 512
-#define LVDBE_CAP_FS_192000 1024
+#define LVDBE_CAP_FS_88200 512
+#define LVDBE_CAP_FS_96000 1024
+#define LVDBE_CAP_FS_176400 2048
+#define LVDBE_CAP_FS_192000 4096
#endif
typedef enum
@@ -215,8 +217,10 @@
LVDBE_FS_44100 = 7,
LVDBE_FS_48000 = 8,
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
- LVDBE_FS_96000 = 9,
- LVDBE_FS_192000 = 10,
+ LVDBE_FS_88200 = 9,
+ LVDBE_FS_96000 = 10,
+ LVDBE_FS_176400 = 11,
+ LVDBE_FS_192000 = 12,
#endif
LVDBE_FS_MAX = LVM_MAXINT_32
} LVDBE_Fs_en;
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
index 4ecaf14..8f058e8 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
@@ -580,12 +580,24 @@
#define HPF_Fs48000_Fc55_B2 0.989882f
#ifdef HIGHER_FS
+#define HPF_Fs88200_Fc55_A0 0.985818f
+#define HPF_Fs88200_Fc55_A1 (-1.971636f)
+#define HPF_Fs88200_Fc55_A2 0.985818f
+#define HPF_Fs88200_Fc55_B1 (-1.994466f)
+#define HPF_Fs88200_Fc55_B2 0.994481f
+
#define HPF_Fs96000_Fc55_A0 0.986040f
#define HPF_Fs96000_Fc55_A1 (-1.972080f)
#define HPF_Fs96000_Fc55_A2 0.986040f
#define HPF_Fs96000_Fc55_B1 (-1.994915f)
#define HPF_Fs96000_Fc55_B2 0.994928f
+#define HPF_Fs176400_Fc55_A0 0.987183f
+#define HPF_Fs176400_Fc55_A1 (-1.974366f)
+#define HPF_Fs176400_Fc55_A2 0.987183f
+#define HPF_Fs176400_Fc55_B1 (-1.997233f)
+#define HPF_Fs176400_Fc55_B2 0.997237f
+
#define HPF_Fs192000_Fc55_A0 0.987294f
#define HPF_Fs192000_Fc55_A1 (-1.974588f)
#define HPF_Fs192000_Fc55_A2 0.987294f
@@ -642,12 +654,24 @@
#define HPF_Fs48000_Fc66_B2 0.987871f
#ifdef HIGHER_FS
+#define HPF_Fs88200_Fc66_A0 0.985273f
+#define HPF_Fs88200_Fc66_A1 (-1.970546f)
+#define HPF_Fs88200_Fc66_A2 0.985273f
+#define HPF_Fs88200_Fc66_B1 (-1.993359f)
+#define HPF_Fs88200_Fc66_B2 0.993381f
+
#define HPF_Fs96000_Fc66_A0 0.985539f
#define HPF_Fs96000_Fc66_A1 (-1.971077f)
#define HPF_Fs96000_Fc66_A2 0.985539f
#define HPF_Fs96000_Fc66_B1 (-1.993898f)
#define HPF_Fs96000_Fc66_B2 0.993917f
+#define HPF_Fs176400_Fc66_A0 0.986910f
+#define HPF_Fs176400_Fc66_A1 (-1.973820f)
+#define HPF_Fs176400_Fc66_A2 0.986910f
+#define HPF_Fs176400_Fc66_B1 (-1.996679f)
+#define HPF_Fs176400_Fc66_B2 0.996685f
+
#define HPF_Fs192000_Fc66_A0 0.987043f
#define HPF_Fs192000_Fc66_A1 (-1.974086f)
#define HPF_Fs192000_Fc66_A2 0.987043f
@@ -703,12 +727,24 @@
#define HPF_Fs48000_Fc78_B2 0.985681f
#ifdef HIGHER_FS
+#define HPF_Fs88200_Fc78_A0 0.984678f
+#define HPF_Fs88200_Fc78_A1 (-1.969356f)
+#define HPF_Fs88200_Fc78_A2 0.984678f
+#define HPF_Fs88200_Fc78_B1 (-1.992151f)
+#define HPF_Fs88200_Fc78_B2 0.992182f
+
#define HPF_Fs96000_Fc78_A0 0.984992f
#define HPF_Fs96000_Fc78_A1 (-1.969984f)
#define HPF_Fs96000_Fc78_A2 0.984992f
#define HPF_Fs96000_Fc78_B1 (-1.992789f)
#define HPF_Fs96000_Fc78_B2 0.992815f
+#define HPF_Fs176400_Fc78_A0 0.986612f
+#define HPF_Fs176400_Fc78_A1 (-1.973224f)
+#define HPF_Fs176400_Fc78_A2 0.986612f
+#define HPF_Fs176400_Fc78_B1 (-1.996076f)
+#define HPF_Fs176400_Fc78_B2 0.996083f
+
#define HPF_Fs192000_Fc78_A0 0.986769f
#define HPF_Fs192000_Fc78_A1 (-1.973539f)
#define HPF_Fs192000_Fc78_A2 0.986769f
@@ -764,12 +800,24 @@
#define HPF_Fs48000_Fc90_B2 0.983497f
#ifdef HIGHER_FS
+#define HPF_Fs88200_Fc90_A0 0.984084f
+#define HPF_Fs88200_Fc90_A1 (-1.968168f)
+#define HPF_Fs88200_Fc90_A2 0.984084f
+#define HPF_Fs88200_Fc90_B1 (-1.990944f)
+#define HPF_Fs88200_Fc90_B2 0.990985f
+
#define HPF_Fs96000_Fc90_A0 0.984446f
#define HPF_Fs96000_Fc90_A1 (-1.968892f)
#define HPF_Fs96000_Fc90_A2 0.984446f
#define HPF_Fs96000_Fc90_B1 (-1.991680f)
#define HPF_Fs96000_Fc90_B2 0.991714f
+#define HPF_Fs176400_Fc90_A0 0.986314f
+#define HPF_Fs176400_Fc90_A1 (-1.972629f)
+#define HPF_Fs176400_Fc90_A2 0.986314f
+#define HPF_Fs176400_Fc90_B1 (-1.995472f)
+#define HPF_Fs176400_Fc90_B2 0.995482f
+
#define HPF_Fs192000_Fc90_A0 0.986496f
#define HPF_Fs192000_Fc90_A1 (-1.972992f)
#define HPF_Fs192000_Fc90_A2 0.986496f
@@ -831,12 +879,24 @@
#define BPF_Fs48000_Fc55_B2 0.996875f
#ifdef HIGHER_FS
+#define BPF_Fs88200_Fc55_A0 0.000831f
+#define BPF_Fs88200_Fc55_A1 0.000000f
+#define BPF_Fs88200_Fc55_A2 (-0.000831f)
+#define BPF_Fs88200_Fc55_B1 (-1.998321f)
+#define BPF_Fs88200_Fc55_B2 0.998338f
+
#define BPF_Fs96000_Fc55_A0 0.000762f
#define BPF_Fs96000_Fc55_A1 0.000000f
#define BPF_Fs96000_Fc55_A2 (-0.000762f)
#define BPF_Fs96000_Fc55_B1 (-1.998461f)
#define BPF_Fs96000_Fc55_B2 0.998477f
+#define BPF_Fs176400_Fc55_A0 0.000416f
+#define BPF_Fs176400_Fc55_A1 0.000000f
+#define BPF_Fs176400_Fc55_A2 (-0.000416f)
+#define BPF_Fs176400_Fc55_B1 (-1.999164f)
+#define BPF_Fs176400_Fc55_B2 0.999169f
+
#define BPF_Fs192000_Fc55_A0 0.000381f
#define BPF_Fs192000_Fc55_A1 0.000000f
#define BPF_Fs192000_Fc55_A2 (-0.000381f)
@@ -892,12 +952,24 @@
#define BPF_Fs48000_Fc66_B2 0.995690f
#ifdef HIGHER_FS
+#define BPF_Fs88200_Fc66_A0 0.001146f
+#define BPF_Fs88200_Fc66_A1 0.000000f
+#define BPF_Fs88200_Fc66_A2 (-0.001146f)
+#define BPF_Fs88200_Fc66_B1 (-1.997684f)
+#define BPF_Fs88200_Fc66_B2 0.997708f
+
#define BPF_Fs96000_Fc66_A0 0.001055f
#define BPF_Fs96000_Fc66_A1 0.000000f
#define BPF_Fs96000_Fc66_A2 (-0.001055f)
#define BPF_Fs96000_Fc66_B1 (-1.997868f)
#define BPF_Fs96000_Fc66_B2 0.997891f
+#define BPF_Fs176400_Fc66_A0 0.000573f
+#define BPF_Fs176400_Fc66_A1 0.000000f
+#define BPF_Fs176400_Fc66_A2 (-0.000573f)
+#define BPF_Fs176400_Fc66_B1 (-1.998847f)
+#define BPF_Fs176400_Fc66_B2 0.998853f
+
#define BPF_Fs192000_Fc66_A0 0.000528f
#define BPF_Fs192000_Fc66_A1 0.000000f
#define BPF_Fs192000_Fc66_A2 (-0.000528f)
@@ -953,12 +1025,24 @@
#define BPF_Fs48000_Fc78_B2 0.993639f
#ifdef HIGHER_FS
+#define BPF_Fs88200_Fc78_A0 0.001693f
+#define BPF_Fs88200_Fc78_A1 0.000000f
+#define BPF_Fs88200_Fc78_A2 (-0.001693f)
+#define BPF_Fs88200_Fc78_B1 (-1.996582f)
+#define BPF_Fs88200_Fc78_B2 0.996615f
+
#define BPF_Fs96000_Fc78_A0 0.001555f
#define BPF_Fs96000_Fc78_A1 0.000000f
#define BPF_Fs96000_Fc78_A2 (-0.0015555f)
#define BPF_Fs96000_Fc78_B1 (-1.996860f)
#define BPF_Fs96000_Fc78_B2 0.996891f
+#define BPF_Fs176400_Fc78_A0 0.000847f
+#define BPF_Fs176400_Fc78_A1 0.000000f
+#define BPF_Fs176400_Fc78_A2 (-0.000847f)
+#define BPF_Fs176400_Fc78_B1 (-1.998298f)
+#define BPF_Fs176400_Fc78_B2 0.998306f
+
#define BPF_Fs192000_Fc78_A0 0.000778f
#define BPF_Fs192000_Fc78_A1 0.000000f
#define BPF_Fs192000_Fc78_A2 (-0.000778f)
@@ -1014,12 +1098,24 @@
#define BPF_Fs48000_Fc90_B2 0.992177f
#ifdef HIGHER_FS
+#define BPF_Fs88200_Fc90_A0 0.002083f
+#define BPF_Fs88200_Fc90_A1 0.000000f
+#define BPF_Fs88200_Fc90_A2 (-0.002083f)
+#define BPF_Fs88200_Fc90_B1 (-1.995791f)
+#define BPF_Fs88200_Fc90_B2 0.995835f
+
#define BPF_Fs96000_Fc90_A0 0.001913f
#define BPF_Fs96000_Fc90_A1 0.000000f
#define BPF_Fs96000_Fc90_A2 (-0.001913f)
#define BPF_Fs96000_Fc90_B1 (-1.996134f)
#define BPF_Fs96000_Fc90_B2 0.996174f
+#define BPF_Fs176400_Fc90_A0 0.001042f
+#define BPF_Fs176400_Fc90_A1 0.000000f
+#define BPF_Fs176400_Fc90_A2 (-0.001042f)
+#define BPF_Fs176400_Fc90_B1 (-1.997904f)
+#define BPF_Fs176400_Fc90_B2 0.997915f
+
#define BPF_Fs192000_Fc90_A0 0.000958f
#define BPF_Fs192000_Fc90_A1 0.000000f
#define BPF_Fs192000_Fc90_A2 (-0.000958f)
@@ -1045,7 +1141,9 @@
#define AGC_ATTACK_Fs48000 0.971628f
#ifdef HIGHER_FS
+#define AGC_ATTACK_Fs88200 0.984458f
#define AGC_ATTACK_Fs96000 0.985712f
+#define AGC_ATTACK_Fs176400 0.992199f
#define AGC_ATTACK_Fs192000 0.992830f
#endif
@@ -1062,7 +1160,9 @@
#define AGC_DECAY_Fs48000 0.000007f
#ifdef HIGHER_FS
+#define AGC_DECAY_Fs88200 0.0000038f
#define AGC_DECAY_FS96000 0.0000035f
+#define AGC_DECAY_Fs176400 0.00000188f
#define AGC_DECAY_FS192000 0.00000175f
#endif
@@ -1125,7 +1225,9 @@
#define VOL_TC_Fs44100 0.004525f
#define VOL_TC_Fs48000 0.004158f
#ifdef HIGHER_FS
+#define VOL_TC_Fs88200 0.002263f
#define VOL_TC_Fs96000 0.002079f
+#define VOL_TC_Fs176400 0.001131f
#define VOL_TC_Fs192000 0.001039f
#endif
#define MIX_TC_Fs8000 29365 /* Floating point value 0.896151 */
@@ -1138,9 +1240,13 @@
#define MIX_TC_Fs44100 32097 /* Floating point value 0.979515 */
#define MIX_TC_Fs48000 32150 /* Floating point value 0.981150 */
#ifdef HIGHER_FS
+/* Floating point value 0.989704 */
+#define MIX_TC_Fs88200 32430
#define MIX_TC_Fs96000 32456 /* Floating point value 0.990530 */
+/* Floating point value 0.994838 */
+#define MIX_TC_Fs176400 32598
#define MIX_TC_Fs192000 32611 /* Floating point value 0.992524 */
#endif
#endif /*BUILD_FLOAT*/
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Tables.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Tables.c
index c4a9b14..a2ce404 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Tables.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Tables.c
@@ -88,11 +88,21 @@
-HPF_Fs48000_Fc55_B2,
-HPF_Fs48000_Fc55_B1},
#ifdef HIGHER_FS
+ {HPF_Fs88200_Fc55_A2, /* 88kS/s coefficients */
+ HPF_Fs88200_Fc55_A1,
+ HPF_Fs88200_Fc55_A0,
+ -HPF_Fs88200_Fc55_B2,
+ -HPF_Fs88200_Fc55_B1},
{HPF_Fs96000_Fc55_A2, /* 96kS/s coefficients */
HPF_Fs96000_Fc55_A1,
HPF_Fs96000_Fc55_A0,
-HPF_Fs96000_Fc55_B2,
-HPF_Fs96000_Fc55_B1},
+ {HPF_Fs176400_Fc55_A2, /* 176kS/s coefficients */
+ HPF_Fs176400_Fc55_A1,
+ HPF_Fs176400_Fc55_A0,
+ -HPF_Fs176400_Fc55_B2,
+ -HPF_Fs176400_Fc55_B1},
{HPF_Fs192000_Fc55_A2, /* 192kS/s coefficients */
HPF_Fs192000_Fc55_A1,
HPF_Fs192000_Fc55_A0,
@@ -147,11 +157,21 @@
-HPF_Fs48000_Fc66_B2,
-HPF_Fs48000_Fc66_B1},
#ifdef HIGHER_FS
+ {HPF_Fs88200_Fc66_A2, /* 88kS/s coefficients */
+ HPF_Fs88200_Fc66_A1,
+ HPF_Fs88200_Fc66_A0,
+ -HPF_Fs88200_Fc66_B2,
+ -HPF_Fs88200_Fc66_B1},
{HPF_Fs96000_Fc66_A2, /* 96kS/s coefficients */
HPF_Fs96000_Fc66_A1,
HPF_Fs96000_Fc66_A0,
-HPF_Fs96000_Fc66_B2,
-HPF_Fs96000_Fc66_B1},
+ {HPF_Fs176400_Fc66_A2, /* 176kS/s coefficients */
+ HPF_Fs176400_Fc66_A1,
+ HPF_Fs176400_Fc66_A0,
+ -HPF_Fs176400_Fc66_B2,
+ -HPF_Fs176400_Fc66_B1},
{HPF_Fs192000_Fc66_A2, /* 192kS/s coefficients */
HPF_Fs192000_Fc66_A1,
HPF_Fs192000_Fc66_A0,
@@ -207,11 +227,21 @@
-HPF_Fs48000_Fc78_B2,
-HPF_Fs48000_Fc78_B1},
#ifdef HIGHER_FS
+ {HPF_Fs88200_Fc78_A2, /* 88kS/s coefficients */
+ HPF_Fs88200_Fc78_A1,
+ HPF_Fs88200_Fc78_A0,
+ -HPF_Fs88200_Fc78_B2,
+ -HPF_Fs88200_Fc78_B1},
{HPF_Fs96000_Fc78_A2, /* 96kS/s coefficients */
HPF_Fs96000_Fc78_A1,
HPF_Fs96000_Fc78_A0,
-HPF_Fs96000_Fc78_B2,
-HPF_Fs96000_Fc78_B1},
+ {HPF_Fs176400_Fc78_A2, /* 176kS/s coefficients */
+ HPF_Fs176400_Fc78_A1,
+ HPF_Fs176400_Fc78_A0,
+ -HPF_Fs176400_Fc78_B2,
+ -HPF_Fs176400_Fc78_B1},
{HPF_Fs192000_Fc78_A2, /* 192kS/s coefficients */
HPF_Fs192000_Fc78_A1,
HPF_Fs192000_Fc78_A0,
@@ -269,11 +299,21 @@
#ifdef HIGHER_FS
,
+ {HPF_Fs88200_Fc90_A2, /* 88kS/s coefficients */
+ HPF_Fs88200_Fc90_A1,
+ HPF_Fs88200_Fc90_A0,
+ -HPF_Fs88200_Fc90_B2,
+ -HPF_Fs88200_Fc90_B1},
{HPF_Fs96000_Fc90_A2, /* 96kS/s coefficients */
HPF_Fs96000_Fc90_A1,
HPF_Fs96000_Fc90_A0,
-HPF_Fs96000_Fc90_B2,
-HPF_Fs96000_Fc90_B1},
+ {HPF_Fs176400_Fc90_A2, /* 176kS/s coefficients */
+ HPF_Fs176400_Fc90_A1,
+ HPF_Fs176400_Fc90_A0,
+ -HPF_Fs176400_Fc90_B2,
+ -HPF_Fs176400_Fc90_B1},
{HPF_Fs192000_Fc90_A2, /* 192kS/s coefficients */
HPF_Fs192000_Fc90_A1,
HPF_Fs192000_Fc90_A0,
@@ -320,9 +360,15 @@
-BPF_Fs48000_Fc55_B2,
-BPF_Fs48000_Fc55_B1},
#ifdef HIGHER_FS
+ {BPF_Fs88200_Fc55_A0, /* 88kS/s coefficients */
+ -BPF_Fs88200_Fc55_B2,
+ -BPF_Fs88200_Fc55_B1},
{BPF_Fs96000_Fc55_A0, /* 96kS/s coefficients */
-BPF_Fs96000_Fc55_B2,
-BPF_Fs96000_Fc55_B1},
+ {BPF_Fs176400_Fc55_A0, /* 176kS/s coefficients */
+ -BPF_Fs176400_Fc55_B2,
+ -BPF_Fs176400_Fc55_B1},
{BPF_Fs192000_Fc55_A0, /* 192kS/s coefficients */
-BPF_Fs192000_Fc55_B2,
-BPF_Fs192000_Fc55_B1},
@@ -357,9 +403,15 @@
-BPF_Fs48000_Fc66_B2,
-BPF_Fs48000_Fc66_B1},
#ifdef HIGHER_FS
+ {BPF_Fs88200_Fc66_A0, /* 88kS/s coefficients */
+ -BPF_Fs88200_Fc66_B2,
+ -BPF_Fs88200_Fc66_B1},
{BPF_Fs96000_Fc66_A0, /* 96kS/s coefficients */
-BPF_Fs96000_Fc66_B2,
-BPF_Fs96000_Fc66_B1},
+ {BPF_Fs176400_Fc66_A0, /* 176kS/s coefficients */
+ -BPF_Fs176400_Fc66_B2,
+ -BPF_Fs176400_Fc66_B1},
{BPF_Fs192000_Fc66_A0, /* 192kS/s coefficients */
-BPF_Fs192000_Fc66_B2,
-BPF_Fs192000_Fc66_B1},
@@ -394,9 +446,15 @@
-BPF_Fs48000_Fc78_B2,
-BPF_Fs48000_Fc78_B1},
#ifdef HIGHER_FS
+ {BPF_Fs88200_Fc66_A0, /* 88kS/s coefficients */
+ -BPF_Fs88200_Fc66_B2,
+ -BPF_Fs88200_Fc66_B1},
{BPF_Fs96000_Fc78_A0, /* 96kS/s coefficients */
-BPF_Fs96000_Fc78_B2,
-BPF_Fs96000_Fc78_B1},
+ {BPF_Fs176400_Fc66_A0, /* 176kS/s coefficients */
+ -BPF_Fs176400_Fc66_B2,
+ -BPF_Fs176400_Fc66_B1},
{BPF_Fs192000_Fc78_A0, /* 192kS/s coefficients */
-BPF_Fs192000_Fc78_B2,
-BPF_Fs192000_Fc78_B1},
@@ -432,9 +490,15 @@
-BPF_Fs48000_Fc90_B1}
#ifdef HIGHER_FS
,
+ {BPF_Fs88200_Fc90_A0, /* 88kS/s coefficients */
+ -BPF_Fs88200_Fc90_B2,
+ -BPF_Fs88200_Fc90_B1},
{BPF_Fs96000_Fc90_A0, /* 96kS/s coefficients */
-BPF_Fs96000_Fc90_B2,
-BPF_Fs96000_Fc90_B1},
+ {BPF_Fs176400_Fc90_A0, /* 176kS/s coefficients */
+ -BPF_Fs176400_Fc90_B2,
+ -BPF_Fs176400_Fc90_B1},
{BPF_Fs192000_Fc90_A0, /* 192kS/s coefficients */
-BPF_Fs192000_Fc90_B2,
-BPF_Fs192000_Fc90_B1}
@@ -466,7 +530,9 @@
AGC_ATTACK_Fs44100,
AGC_ATTACK_Fs48000
#ifdef HIGHER_FS
+ ,AGC_ATTACK_Fs88200
,AGC_ATTACK_Fs96000
+ ,AGC_ATTACK_Fs176400
,AGC_ATTACK_Fs192000
#endif
@@ -488,7 +554,9 @@
AGC_DECAY_Fs44100,
AGC_DECAY_Fs48000
#ifdef HIGHER_FS
+ ,AGC_DECAY_Fs88200
,AGC_DECAY_FS96000
+ ,AGC_DECAY_Fs176400
,AGC_DECAY_FS192000
#endif
@@ -583,7 +651,9 @@
VOL_TC_Fs44100,
VOL_TC_Fs48000
#ifdef HIGHER_FS
+ ,VOL_TC_Fs88200
,VOL_TC_Fs96000
+ ,VOL_TC_Fs176400
,VOL_TC_Fs192000
#endif
};
@@ -602,7 +672,9 @@
MIX_TC_Fs44100,
MIX_TC_Fs48000
#ifdef HIGHER_FS
+ ,MIX_TC_Fs88200
,MIX_TC_Fs96000
+ ,MIX_TC_Fs176400
,MIX_TC_Fs192000
#endif
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
index 8c04847..bab4049 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
@@ -487,6 +487,97 @@
#define HPF_Fs48000_Gain15_B2 0.000000
#ifdef HIGHER_FS
+/* Coefficients for sample rate 88200 */
+/* Gain = 1.000000 dB */
+#define HPF_Fs88200_Gain1_A0 1.094374f
+#define HPF_Fs88200_Gain1_A1 (-0.641256f)
+#define HPF_Fs88200_Gain1_A2 0.000000f
+#define HPF_Fs88200_Gain1_B1 (-0.546882f)
+#define HPF_Fs88200_Gain1_B2 0.000000f
+/* Gain = 2.000000 dB */
+#define HPF_Fs88200_Gain2_A0 1.200264f
+#define HPF_Fs88200_Gain2_A1 (-0.747146f)
+#define HPF_Fs88200_Gain2_A2 0.000000f
+#define HPF_Fs88200_Gain2_B1 (-0.546882f)
+#define HPF_Fs88200_Gain2_B2 0.000000f
+/* Gain = 3.000000 dB */
+#define HPF_Fs88200_Gain3_A0 1.319074f
+#define HPF_Fs88200_Gain3_A1 (-0.865956f)
+#define HPF_Fs88200_Gain3_A2 0.000000f
+#define HPF_Fs88200_Gain3_B1 (-0.546882f)
+#define HPF_Fs88200_Gain3_B2 0.000000f
+/* Gain = 4.000000 dB */
+#define HPF_Fs88200_Gain4_A0 1.452380f
+#define HPF_Fs88200_Gain4_A1 (-0.999263f)
+#define HPF_Fs88200_Gain4_A2 0.000000f
+#define HPF_Fs88200_Gain4_B1 (-0.546882f)
+#define HPF_Fs88200_Gain4_B2 0.000000f
+/* Gain = 5.000000 dB */
+#define HPF_Fs88200_Gain5_A0 1.601953f
+#define HPF_Fs88200_Gain5_A1 (-1.148836f)
+#define HPF_Fs88200_Gain5_A2 0.000000f
+#define HPF_Fs88200_Gain5_B1 (-0.546882f)
+#define HPF_Fs88200_Gain5_B2 0.000000f
+/* Gain = 6.000000 dB */
+#define HPF_Fs88200_Gain6_A0 1.769777f
+#define HPF_Fs88200_Gain6_A1 (-1.316659f)
+#define HPF_Fs88200_Gain6_A2 0.000000f
+#define HPF_Fs88200_Gain6_B1 (-0.546882f)
+#define HPF_Fs88200_Gain6_B2 0.000000f
+/* Gain = 7.000000 dB */
+#define HPF_Fs88200_Gain7_A0 1.958078f
+#define HPF_Fs88200_Gain7_A1 (-1.504960f)
+#define HPF_Fs88200_Gain7_A2 0.000000f
+#define HPF_Fs88200_Gain7_B1 (-0.546882f)
+#define HPF_Fs88200_Gain7_B2 0.000000f
+/* Gain = 8.000000 dB */
+#define HPF_Fs88200_Gain8_A0 2.169355f
+#define HPF_Fs88200_Gain8_A1 (-1.716238f)
+#define HPF_Fs88200_Gain8_A2 0.000000f
+#define HPF_Fs88200_Gain8_B1 (-0.546882f)
+#define HPF_Fs88200_Gain8_B2 0.000000f
+/* Gain = 9.000000 dB */
+#define HPF_Fs88200_Gain9_A0 2.406412f
+#define HPF_Fs88200_Gain9_A1 (-1.953295f)
+#define HPF_Fs88200_Gain9_A2 0.000000f
+#define HPF_Fs88200_Gain9_B1 (-0.546882f)
+#define HPF_Fs88200_Gain9_B2 0.000000f
+/* Gain = 10.000000 dB */
+#define HPF_Fs88200_Gain10_A0 2.672395f
+#define HPF_Fs88200_Gain10_A1 (-2.219277f)
+#define HPF_Fs88200_Gain10_A2 0.000000f
+#define HPF_Fs88200_Gain10_B1 (-0.546882f)
+#define HPF_Fs88200_Gain10_B2 0.000000f
+/* Gain = 11.000000 dB */
+#define HPF_Fs88200_Gain11_A0 2.970832f
+#define HPF_Fs88200_Gain11_A1 (-2.517714f)
+#define HPF_Fs88200_Gain11_A2 0.000000f
+#define HPF_Fs88200_Gain11_B1 (-0.546882f)
+#define HPF_Fs88200_Gain11_B2 0.000000f
+/* Gain = 12.000000 dB */
+#define HPF_Fs88200_Gain12_A0 3.305684f
+#define HPF_Fs88200_Gain12_A1 (-2.852566f)
+#define HPF_Fs88200_Gain12_A2 0.000000f
+#define HPF_Fs88200_Gain12_B1 (-0.546882f)
+#define HPF_Fs88200_Gain12_B2 0.000000f
+/* Gain = 13.000000 dB */
+#define HPF_Fs88200_Gain13_A0 3.681394f
+#define HPF_Fs88200_Gain13_A1 (-3.228276f)
+#define HPF_Fs88200_Gain13_A2 0.000000f
+#define HPF_Fs88200_Gain13_B1 (-0.546882f)
+#define HPF_Fs88200_Gain13_B2 0.000000f
+/* Gain = 14.000000 dB */
+#define HPF_Fs88200_Gain14_A0 4.102947f
+#define HPF_Fs88200_Gain14_A1 (-3.649830f)
+#define HPF_Fs88200_Gain14_A2 0.000000f
+#define HPF_Fs88200_Gain14_B1 (-0.546882f)
+#define HPF_Fs88200_Gain14_B2 0.000000f
+/* Gain = 15.000000 dB */
+#define HPF_Fs88200_Gain15_A0 4.575938f
+#define HPF_Fs88200_Gain15_A1 (-4.122820f)
+#define HPF_Fs88200_Gain15_A2 0.000000f
+#define HPF_Fs88200_Gain15_B1 (-0.546882f)
+#define HPF_Fs88200_Gain15_B2 0.000000f
/* Coefficients for sample rate 96000Hz */
/* Gain = 1.000000 dB */
@@ -580,6 +671,98 @@
#define HPF_Fs96000_Gain15_B1 (-0.577350)
#define HPF_Fs96000_Gain15_B2 0.000000
+/* Coefficients for sample rate 176400 */
+/* Gain = 1.000000 dB */
+#define HPF_Fs176400_Gain1_A0 1.106711f
+#define HPF_Fs176400_Gain1_A1 (-0.855807f)
+#define HPF_Fs176400_Gain1_A2 0.000000f
+#define HPF_Fs176400_Gain1_B1 (-0.749096f)
+#define HPF_Fs176400_Gain1_B2 0.000000f
+/* Gain = 2.000000 dB */
+#define HPF_Fs176400_Gain2_A0 1.226443f
+#define HPF_Fs176400_Gain2_A1 (-0.975539f)
+#define HPF_Fs176400_Gain2_A2 0.000000f
+#define HPF_Fs176400_Gain2_B1 (-0.749096f)
+#define HPF_Fs176400_Gain2_B2 0.000000f
+/* Gain = 3.000000 dB */
+#define HPF_Fs176400_Gain3_A0 1.360784f
+#define HPF_Fs176400_Gain3_A1 (-1.109880f)
+#define HPF_Fs176400_Gain3_A2 0.000000f
+#define HPF_Fs176400_Gain3_B1 (-0.749096f)
+#define HPF_Fs176400_Gain3_B2 0.000000f
+/* Gain = 4.000000 dB */
+#define HPF_Fs176400_Gain4_A0 1.511517f
+#define HPF_Fs176400_Gain4_A1 (-1.260613f)
+#define HPF_Fs176400_Gain4_A2 0.000000f
+#define HPF_Fs176400_Gain4_B1 (-0.749096f)
+#define HPF_Fs176400_Gain4_B2 0.000000f
+/* Gain = 5.000000 dB */
+#define HPF_Fs176400_Gain5_A0 1.680643f
+#define HPF_Fs176400_Gain5_A1 (-1.429739f)
+#define HPF_Fs176400_Gain5_A2 0.000000f
+#define HPF_Fs176400_Gain5_B1 (-0.749096f)
+#define HPF_Fs176400_Gain5_B2 0.000000f
+/* Gain = 6.000000 dB */
+#define HPF_Fs176400_Gain6_A0 1.870405f
+#define HPF_Fs176400_Gain6_A1 (-1.619501f)
+#define HPF_Fs176400_Gain6_A2 0.000000f
+#define HPF_Fs176400_Gain6_B1 (-0.749096f)
+#define HPF_Fs176400_Gain6_B2 0.000000f
+/* Gain = 7.000000 dB */
+#define HPF_Fs176400_Gain7_A0 2.083321f
+#define HPF_Fs176400_Gain7_A1 (-1.832417f)
+#define HPF_Fs176400_Gain7_A2 0.000000f
+#define HPF_Fs176400_Gain7_B1 (-0.749096f)
+#define HPF_Fs176400_Gain7_B2 0.000000f
+/* Gain = 8.000000 dB */
+#define HPF_Fs176400_Gain8_A0 2.322217f
+#define HPF_Fs176400_Gain8_A1 (-2.071313f)
+#define HPF_Fs176400_Gain8_A2 0.000000f
+#define HPF_Fs176400_Gain8_B1 (-0.749096f)
+#define HPF_Fs176400_Gain8_B2 0.000000f
+/* Gain = 9.000000 dB */
+#define HPF_Fs176400_Gain9_A0 2.590263f
+#define HPF_Fs176400_Gain9_A1 (-2.339359f)
+#define HPF_Fs176400_Gain9_A2 0.000000f
+#define HPF_Fs176400_Gain9_B1 (-0.749096f)
+#define HPF_Fs176400_Gain9_B2 0.000000f
+/* Gain = 10.000000 dB */
+#define HPF_Fs176400_Gain10_A0 2.891016f
+#define HPF_Fs176400_Gain10_A1 (-2.640112f)
+#define HPF_Fs176400_Gain10_A2 0.000000f
+#define HPF_Fs176400_Gain10_B1 (-0.749096f)
+#define HPF_Fs176400_Gain10_B2 0.000000f
+/* Gain = 11.000000 dB */
+#define HPF_Fs176400_Gain11_A0 3.228465f
+#define HPF_Fs176400_Gain11_A1 (-2.977561f)
+#define HPF_Fs176400_Gain11_A2 0.000000f
+#define HPF_Fs176400_Gain11_B1 (-0.749096f)
+#define HPF_Fs176400_Gain11_B2 0.000000f
+/* Gain = 12.000000 dB */
+#define HPF_Fs176400_Gain12_A0 3.607090f
+#define HPF_Fs176400_Gain12_A1 (-3.356186f)
+#define HPF_Fs176400_Gain12_A2 0.000000f
+#define HPF_Fs176400_Gain12_B1 (-0.749096f)
+#define HPF_Fs176400_Gain12_B2 0.000000f
+/* Gain = 13.000000 dB */
+#define HPF_Fs176400_Gain13_A0 4.031914f
+#define HPF_Fs176400_Gain13_A1 (-3.781010f)
+#define HPF_Fs176400_Gain13_A2 0.000000f
+#define HPF_Fs176400_Gain13_B1 (-0.749096f)
+#define HPF_Fs176400_Gain13_B2 0.000000f
+/* Gain = 14.000000 dB */
+#define HPF_Fs176400_Gain14_A0 4.508575f
+#define HPF_Fs176400_Gain14_A1 (-4.257671f)
+#define HPF_Fs176400_Gain14_A2 0.000000f
+#define HPF_Fs176400_Gain14_B1 (-0.749096f)
+#define HPF_Fs176400_Gain14_B2 0.000000f
+/* Gain = 15.000000 dB */
+#define HPF_Fs176400_Gain15_A0 5.043397f
+#define HPF_Fs176400_Gain15_A1 (-4.792493f)
+#define HPF_Fs176400_Gain15_A2 0.000000f
+#define HPF_Fs176400_Gain15_B1 (-0.749096f)
+#define HPF_Fs176400_Gain15_B2 0.000000f
+
/* Coefficients for sample rate 192000Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs192000_Gain1_A0 1.107823
@@ -1216,4 +1399,4 @@
#endif
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index 7b85f23..62b4c73 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -71,7 +71,8 @@
((pParams->SampleRate != LVM_FS_8000) && (pParams->SampleRate != LVM_FS_11025) && (pParams->SampleRate != LVM_FS_12000) &&
(pParams->SampleRate != LVM_FS_16000) && (pParams->SampleRate != LVM_FS_22050) && (pParams->SampleRate != LVM_FS_24000) &&
(pParams->SampleRate != LVM_FS_32000) && (pParams->SampleRate != LVM_FS_44100) && (pParams->SampleRate != LVM_FS_48000) &&
- (pParams->SampleRate != LVM_FS_96000) && (pParams->SampleRate != LVM_FS_192000)) ||
+ (pParams->SampleRate != LVM_FS_88200) && (pParams->SampleRate != LVM_FS_96000) &&
+ (pParams->SampleRate != LVM_FS_176400) && (pParams->SampleRate != LVM_FS_192000)) ||
#else
((pParams->SampleRate != LVM_FS_8000) && (pParams->SampleRate != LVM_FS_11025) && (pParams->SampleRate != LVM_FS_12000) &&
(pParams->SampleRate != LVM_FS_16000) && (pParams->SampleRate != LVM_FS_22050) && (pParams->SampleRate != LVM_FS_24000) &&
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
index ade329b..0669a81 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
@@ -233,7 +233,13 @@
* Set the capabilities
*/
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
- DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 | LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 | LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 | LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 | LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_192000;
+ DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 |
+ LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 |
+ LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 |
+ LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 |
+ LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_88200 |
+ LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_176400 |
+ LVDBE_CAP_FS_192000;
#else
DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 | LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 | LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 | LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 | LVDBE_CAP_FS_48000;
#endif
@@ -270,7 +276,13 @@
* Set the capabilities
*/
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
- EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 | LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 | LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 | LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 | LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_192000;
+ EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 |
+ LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 |
+ LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 |
+ LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 |
+ LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_88200 |
+ LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_176400 |
+ LVEQNB_CAP_FS_192000;
#else
EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 | LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 | LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 | LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 | LVEQNB_CAP_FS_48000;
#endif
@@ -747,7 +759,13 @@
* Set the initialisation capabilities
*/
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
- DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 | LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 | LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 | LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 | LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_192000;
+ DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 |
+ LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 |
+ LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 |
+ LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 |
+ LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_88200 |
+ LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_176400 |
+ LVDBE_CAP_FS_192000;
#else
DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 | LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 | LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 | LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 | LVDBE_CAP_FS_48000;
#endif
@@ -805,7 +823,13 @@
* Set the initialisation capabilities
*/
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
- EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 | LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 | LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 | LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 | LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_192000;
+ EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 |
+ LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 |
+ LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 |
+ LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 |
+ LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_88200 |
+ LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_176400 |
+ LVEQNB_CAP_FS_192000;
#else
EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 | LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 | LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 | LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 | LVEQNB_CAP_FS_48000;
#endif
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Tables.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Tables.c
index 199ddde..a5356d2 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Tables.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Tables.c
@@ -269,6 +269,53 @@
-HPF_Fs48000_Gain15_B1}
#ifdef HIGHER_FS
,
+ /* 88kHz Sampling rate */
+ {HPF_Fs88200_Gain1_A1, /* Gain Setting 1 */
+ HPF_Fs88200_Gain1_A0,
+ -HPF_Fs88200_Gain1_B1},
+ {HPF_Fs88200_Gain2_A1, /* Gain Setting 2 */
+ HPF_Fs88200_Gain2_A0,
+ -HPF_Fs88200_Gain2_B1},
+ {HPF_Fs88200_Gain3_A1, /* Gain Setting 3 */
+ HPF_Fs88200_Gain3_A0,
+ -HPF_Fs88200_Gain3_B1},
+ {HPF_Fs88200_Gain4_A1, /* Gain Setting 4 */
+ HPF_Fs88200_Gain4_A0,
+ -HPF_Fs88200_Gain4_B1},
+ {HPF_Fs88200_Gain5_A1, /* Gain Setting 5 */
+ HPF_Fs88200_Gain5_A0,
+ -HPF_Fs88200_Gain5_B1},
+ {HPF_Fs88200_Gain6_A1, /* Gain Setting 6 */
+ HPF_Fs88200_Gain6_A0,
+ -HPF_Fs88200_Gain6_B1},
+ {HPF_Fs88200_Gain7_A1, /* Gain Setting 7 */
+ HPF_Fs88200_Gain7_A0,
+ -HPF_Fs88200_Gain7_B1},
+ {HPF_Fs88200_Gain8_A1, /* Gain Setting 8 */
+ HPF_Fs88200_Gain8_A0,
+ -HPF_Fs88200_Gain8_B1},
+ {HPF_Fs88200_Gain9_A1, /* Gain Setting 9 */
+ HPF_Fs88200_Gain9_A0,
+ -HPF_Fs88200_Gain9_B1},
+ {HPF_Fs88200_Gain10_A1, /* Gain Setting 10 */
+ HPF_Fs88200_Gain10_A0,
+ -HPF_Fs88200_Gain10_B1},
+ {HPF_Fs88200_Gain11_A1, /* Gain Setting 11 */
+ HPF_Fs88200_Gain11_A0,
+ -HPF_Fs88200_Gain11_B1},
+ {HPF_Fs88200_Gain12_A1, /* Gain Setting 12 */
+ HPF_Fs88200_Gain12_A0,
+ -HPF_Fs88200_Gain12_B1},
+ {HPF_Fs88200_Gain13_A1, /* Gain Setting 13 */
+ HPF_Fs88200_Gain13_A0,
+ -HPF_Fs88200_Gain13_B1},
+ {HPF_Fs88200_Gain14_A1, /* Gain Setting 14 */
+ HPF_Fs88200_Gain14_A0,
+ -HPF_Fs88200_Gain14_B1},
+ {HPF_Fs88200_Gain15_A1, /* Gain Setting 15 */
+ HPF_Fs88200_Gain15_A0,
+ -HPF_Fs88200_Gain15_B1},
+
/* 96kHz sampling rate */
{HPF_Fs96000_Gain1_A1, /* Gain setting 1 */
HPF_Fs96000_Gain1_A0,
@@ -316,6 +363,53 @@
HPF_Fs96000_Gain15_A0,
-HPF_Fs96000_Gain15_B1},
+ /* 176kHz Sampling rate */
+ {HPF_Fs176400_Gain1_A1, /* Gain Setting 1 */
+ HPF_Fs176400_Gain1_A0,
+ -HPF_Fs176400_Gain1_B1},
+ {HPF_Fs176400_Gain2_A1, /* Gain Setting 2 */
+ HPF_Fs176400_Gain2_A0,
+ -HPF_Fs176400_Gain2_B1},
+ {HPF_Fs176400_Gain3_A1, /* Gain Setting 3 */
+ HPF_Fs176400_Gain3_A0,
+ -HPF_Fs176400_Gain3_B1},
+ {HPF_Fs176400_Gain4_A1, /* Gain Setting 4 */
+ HPF_Fs176400_Gain4_A0,
+ -HPF_Fs176400_Gain4_B1},
+ {HPF_Fs176400_Gain5_A1, /* Gain Setting 5 */
+ HPF_Fs176400_Gain5_A0,
+ -HPF_Fs176400_Gain5_B1},
+ {HPF_Fs176400_Gain6_A1, /* Gain Setting 6 */
+ HPF_Fs176400_Gain6_A0,
+ -HPF_Fs176400_Gain6_B1},
+ {HPF_Fs176400_Gain7_A1, /* Gain Setting 7 */
+ HPF_Fs176400_Gain7_A0,
+ -HPF_Fs176400_Gain7_B1},
+ {HPF_Fs176400_Gain8_A1, /* Gain Setting 8 */
+ HPF_Fs176400_Gain8_A0,
+ -HPF_Fs176400_Gain8_B1},
+ {HPF_Fs176400_Gain9_A1, /* Gain Setting 9 */
+ HPF_Fs176400_Gain9_A0,
+ -HPF_Fs176400_Gain9_B1},
+ {HPF_Fs176400_Gain10_A1, /* Gain Setting 10 */
+ HPF_Fs176400_Gain10_A0,
+ -HPF_Fs176400_Gain10_B1},
+ {HPF_Fs176400_Gain11_A1, /* Gain Setting 11 */
+ HPF_Fs176400_Gain11_A0,
+ -HPF_Fs176400_Gain11_B1},
+ {HPF_Fs176400_Gain12_A1, /* Gain Setting 12 */
+ HPF_Fs176400_Gain12_A0,
+ -HPF_Fs176400_Gain12_B1},
+ {HPF_Fs176400_Gain13_A1, /* Gain Setting 13 */
+ HPF_Fs176400_Gain13_A0,
+ -HPF_Fs176400_Gain13_B1},
+ {HPF_Fs176400_Gain14_A1, /* Gain Setting 14 */
+ HPF_Fs176400_Gain14_A0,
+ -HPF_Fs176400_Gain14_B1},
+ {HPF_Fs176400_Gain15_A1, /* Gain Setting 15 */
+ HPF_Fs176400_Gain15_A0,
+ -HPF_Fs176400_Gain15_B1},
+
/* 192kHz sampling rate */
{HPF_Fs192000_Gain1_A1, /* Gain setting 1 */
HPF_Fs192000_Gain1_A0,
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index 303b62d..59586e0 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -169,8 +169,10 @@
LVM_FS_44100 = 7,
LVM_FS_48000 = 8,
#ifdef HIGHER_FS
- LVM_FS_96000 = 9,
- LVM_FS_192000 = 10,
+ LVM_FS_88200 = 9,
+ LVM_FS_96000 = 10,
+ LVM_FS_176400 = 11,
+ LVM_FS_192000 = 12,
#endif
LVM_FS_INVALID = LVM_MAXENUM-1,
LVM_FS_DUMMY = LVM_MAXENUM
diff --git a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
index e7fdbf6..385dbcf 100644
--- a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
+++ b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
@@ -201,8 +201,10 @@
#define LVEQNB_CAP_FS_44100 128
#define LVEQNB_CAP_FS_48000 256
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
-#define LVEQNB_CAP_FS_96000 512
-#define LVEQNB_CAP_FS_192000 1024
+#define LVEQNB_CAP_FS_88200 512
+#define LVEQNB_CAP_FS_96000 1024
+#define LVEQNB_CAP_FS_176400 2048
+#define LVEQNB_CAP_FS_192000 4096
#endif
typedef enum
@@ -217,8 +219,10 @@
LVEQNB_FS_44100 = 7,
LVEQNB_FS_48000 = 8,
#ifdef HIGHER_FS
- LVEQNB_FS_96000 = 9,
- LVEQNB_FS_192000 = 10,
+ LVEQNB_FS_88200 = 9,
+ LVEQNB_FS_96000 = 10,
+ LVEQNB_FS_176400 = 11,
+ LVEQNB_FS_192000 = 12,
#endif
LVEQNB_FS_MAX = LVM_MAXINT_32
} LVEQNB_Fs_en;
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
index 42ea46f..755141e 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
@@ -109,7 +109,9 @@
#define LVEQNB_2PiOn_48000 0.000131f
#ifdef HIGHER_FS
+#define LVEQNB_2PiOn_88200 0.000071f
#define LVEQNB_2PiOn_96000 0.000065f
+#define LVEQNB_2PiOn_176400 0.000036f
#define LVEQNB_2PiOn_192000 0.000033f
#endif
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Tables.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Tables.c
index 563181c..453c42d 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Tables.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Tables.c
@@ -46,7 +46,9 @@
32000,
44100,
48000,
+ 88200,
96000,
+ 176400,
192000
};
#else
@@ -82,7 +84,9 @@
LVEQNB_2PiOn_44100,
LVEQNB_2PiOn_48000
#ifdef HIGHER_FS
+ ,LVEQNB_2PiOn_88200
,LVEQNB_2PiOn_96000
+ ,LVEQNB_2PiOn_176400
,LVEQNB_2PiOn_192000
#endif
};
@@ -249,30 +253,4 @@
16586, /* a2 */
-44}; /* a3 */
-/************************************************************************************/
-/* */
-/* Bypass mixer time constants (100ms) */
-/* */
-/************************************************************************************/
-#define LVEQNB_MIX_TC_Fs8000 32580 /* Floating point value 0.994262695 */
-#define LVEQNB_MIX_TC_Fs11025 32632 /* Floating point value 0.995849609 */
-#define LVEQNB_MIX_TC_Fs12000 32643 /* Floating point value 0.996185303 */
-#define LVEQNB_MIX_TC_Fs16000 32674 /* Floating point value 0.997131348 */
-#define LVEQNB_MIX_TC_Fs22050 32700 /* Floating point value 0.997924805 */
-#define LVEQNB_MIX_TC_Fs24000 32705 /* Floating point value 0.998077393 */
-#define LVEQNB_MIX_TC_Fs32000 32721 /* Floating point value 0.998565674 */
-#define LVEQNB_MIX_TC_Fs44100 32734 /* Floating point value 0.998962402 */
-#define LVEQNB_MIX_TC_Fs48000 32737 /* Floating point value 0.999053955 */
-
-
-const LVM_INT16 LVEQNB_MixerTCTable[] = {
- LVEQNB_MIX_TC_Fs8000,
- LVEQNB_MIX_TC_Fs11025,
- LVEQNB_MIX_TC_Fs12000,
- LVEQNB_MIX_TC_Fs16000,
- LVEQNB_MIX_TC_Fs22050,
- LVEQNB_MIX_TC_Fs24000,
- LVEQNB_MIX_TC_Fs32000,
- LVEQNB_MIX_TC_Fs44100,
- LVEQNB_MIX_TC_Fs48000};
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_Private.h b/media/libeffects/lvm/lib/Reverb/src/LVREV_Private.h
index ff7475e..c915ac0 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_Private.h
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_Private.h
@@ -123,7 +123,7 @@
#ifndef HIGHER_FS
#define LVREV_NUM_FS 9 /* Number of supported sample rates */
#else
-#define LVREV_NUM_FS 11 /* Number of supported sample rates */
+#define LVREV_NUM_FS 13 /* Number of supported sample rates */
#endif
#define LVREV_MAXBLKSIZE_LIMIT 64 /* Maximum block size low limit */
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
index 8c7807f..dfed28e 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
@@ -68,7 +68,8 @@
(pNewParams->SampleRate != LVM_FS_44100) &&
(pNewParams->SampleRate != LVM_FS_48000)
#ifdef HIGHER_FS
- && (pNewParams->SampleRate != LVM_FS_96000) && (pNewParams->SampleRate != LVM_FS_192000)
+ && (pNewParams->SampleRate != LVM_FS_88200) && (pNewParams->SampleRate != LVM_FS_96000)
+ && (pNewParams->SampleRate != LVM_FS_176400) && (pNewParams->SampleRate != LVM_FS_192000)
#endif
)
#ifdef SUPPORT_MC
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_Tables.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_Tables.c
index b3edc60..1058740 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_Tables.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_Tables.c
@@ -52,7 +52,9 @@
32000,
44100,
48000,
+ 88200,
96000,
+ 176400,
192000
};
#endif
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
index a750bb0..ee07e2e 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
@@ -46,7 +46,7 @@
#ifndef HIGHER_FS
#define LVPSA_NR_SUPPORTED_RATE 9 /* From 8000Hz to 48000Hz*/
#else
-#define LVPSA_NR_SUPPORTED_RATE 11 /* From 8000Hz to 192000Hz*/
+#define LVPSA_NR_SUPPORTED_RATE 13 /* From 8000Hz to 192000Hz*/
#endif
#define LVPSA_NR_SUPPORTED_SPEED 3 /* LOW, MEDIUM, HIGH */
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Tables.c b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Tables.c
index 1287503..f8af496 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Tables.c
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Tables.c
@@ -54,7 +54,9 @@
32000,
44100,
48000,
+ 88200,
96000,
+ 176400,
192000}; /* 192kS/s */
#endif
@@ -78,7 +80,9 @@
48696,
44739
#ifdef HIGHER_FS
+ ,24348
,22369
+ ,12174
,11185 /* 192kS/s */
#endif
};
@@ -105,7 +109,9 @@
882,
960
#ifdef HIGHER_FS
+ ,1764
,1920
+ ,3528
,3840 /* 192kS/s */
#endif
};
@@ -128,7 +134,9 @@
30, /* 44100 S/s */
32 /* 48000 S/s */
#ifdef HIGHER_FS
+ ,60 /* 88200 S/s */
,64 /* 96000 S/s */
+ ,120 /* 176400 S/s */
,128 /*192000 S/s */
#endif
};
@@ -153,7 +161,9 @@
4781,
4392
#ifdef HIGHER_FS
+ ,2390
,2196
+ ,1195
,1098 /* 192kS/s */
#endif
};
@@ -169,7 +179,9 @@
0.1459089f,
0.1340372f
#ifdef HIGHER_FS
+ ,0.0729476f
,0.0670186f
+ ,0.0364738f
,0.0335093f /* 192kS/s */
#endif
};
@@ -352,7 +364,9 @@
/* 48kS/s */
{-0.9932638457976282f,0.0066249934025109f},
#ifdef HIGHER_FS
+ {-0.9931269618682563f,0.0067592649720609f},
{-0.9932638457976282f,0.0066249934025109f},
+ {-0.9931269618682563f,0.0067592649720609f},
{-0.9932638457976282f,0.0066249934025109f},
#endif
/* 8kS/s */ /* LVPSA_SPEED_MEDIUM */
@@ -368,7 +382,9 @@
/* 48kS/s */
{-0.9540119562298059f,0.0445343819446862f},
#ifdef HIGHER_FS
+ {-0.9531011912040412f,0.0453995238058269f},
{-0.9540119562298059f,0.0445343819446862f},
+ {-0.9531011912040412f,0.0453995238058269f},
{-0.9540119562298059f,0.0445343819446862f},
#endif
/* 8kS/s */ /* LVPSA_SPEED_HIGH */
@@ -383,7 +399,9 @@
/* 48kS/s */
{-0.7274807319045067f,0.2356666540727019f}
#ifdef HIGHER_FS
+ ,{-0.7229706319049001f,0.2388987224549055f}
,{-0.7274807319045067f,0.2356666540727019f}
+ ,{-0.7229706319049001f,0.2388987224549055f}
,{-0.7274807319045067f,0.2356666540727019f}
#endif
};
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index 0c2fe53..e45d81f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -152,6 +152,24 @@
#define CS_SIDE_48000_SCALE 14
#ifdef HIGHER_FS
+/* Coefficients for 88200Hz sample rate.
+ * The filter coefficients are obtained by carrying out
+ * state-space analysis using the coefficients available
+ * for 44100Hz.
+ */
+#define CS_MIDDLE_88200_A0 0.233846f
+#define CS_MIDDLE_88200_A1 (-0.232657f)
+#define CS_MIDDLE_88200_A2 0.000000f
+#define CS_MIDDLE_88200_B1 (-0.992747f)
+#define CS_MIDDLE_88200_B2 0.000000f
+#define CS_MIDDLE_88200_SCALE 15
+#define CS_SIDE_88200_A0 0.231541f
+#define CS_SIDE_88200_A1 (-0.289586f)
+#define CS_SIDE_88200_A2 0.058045f
+#define CS_SIDE_88200_B1 (-1.765300f)
+#define CS_SIDE_88200_B2 0.769816f
+#define CS_SIDE_88200_SCALE 14
+
/* Stereo Enhancer coefficients for 96000Hz sample rate, scaled with 0.165*/
/* high pass filter with cutoff frequency 102.18 Hz*/
#define CS_MIDDLE_96000_A0 0.235532
@@ -160,13 +178,33 @@
#define CS_MIDDLE_96000_B1 (-0.993334)
#define CS_MIDDLE_96000_B2 0.000000
#define CS_MIDDLE_96000_SCALE 15
-/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
-#define CS_SIDE_96000_A0 0.016727
-#define CS_SIDE_96000_A1 0.000000
-#define CS_SIDE_96000_A2 (-0.016727)
-#define CS_SIDE_96000_B1 (-1.793372)
-#define CS_SIDE_96000_B2 0.797236
-#define CS_SIDE_96000_SCALE 14
+/* Coefficients calculated using tf2ss and ss2tf functions based on
+ * coefficients available for 48000Hz sampling frequency
+ */
+#define CS_SIDE_96000_A0 0.224326f
+#define CS_SIDE_96000_A1 (-0.294937f)
+#define CS_SIDE_96000_A2 0.070611f
+#define CS_SIDE_96000_B1 (-1.792166f)
+#define CS_SIDE_96000_B2 0.795830f
+#define CS_SIDE_96000_SCALE 14
+
+/* Stereo Enhancer coefficients for 176400Hz sample rate.
+ * The filter coefficients are obtained by carrying out
+ * state-space analysis using the coefficients available
+ * for 44100Hz.
+ */
+#define CS_MIDDLE_176400_A0 0.233973f
+#define CS_MIDDLE_176400_A1 (-0.233378f)
+#define CS_MIDDLE_176400_A2 0.000000f
+#define CS_MIDDLE_176400_B1 (-0.996367f)
+#define CS_MIDDLE_176400_B2 0.000000f
+#define CS_MIDDLE_176400_SCALE 15
+#define CS_SIDE_176400_A0 0.199836f
+#define CS_SIDE_176400_A1 (-0.307544f)
+#define CS_SIDE_176400_A2 0.107708f
+#define CS_SIDE_176400_B1 (-1.876572f)
+#define CS_SIDE_176400_B2 0.877771f
+#define CS_SIDE_176400_SCALE 14
/* Stereo Enhancer coefficients for 192000Hz sample rate, scaled with 0.1689*/
#define CS_MIDDLE_192000_A0 0.241219
@@ -175,13 +213,15 @@
#define CS_MIDDLE_192000_B1 (-0.996661)
#define CS_MIDDLE_192000_B2 0.000000
#define CS_MIDDLE_192000_SCALE 15
-/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
-#define CS_SIDE_192000_A0 0.008991
-#define CS_SIDE_192000_A1 (-0.000000)
-#define CS_SIDE_192000_A2 (-0.008991)
-#define CS_SIDE_192000_B1 (-1.892509)
-#define CS_SIDE_192000_B2 0.893524
-#define CS_SIDE_192000_SCALE 14
+/* Coefficients calculated using tf2ss and ss2tf functions based on
+ * coefficients available for 48000Hz sampling frequency
+ */
+#define CS_SIDE_192000_A0 0.196039f
+#define CS_SIDE_192000_A1 (-0.311027f)
+#define CS_SIDE_192000_A2 0.114988f
+#define CS_SIDE_192000_B1 (-1.891380f)
+#define CS_SIDE_192000_B2 0.8923460f
+#define CS_SIDE_192000_SCALE 14
#endif
/************************************************************************************/
@@ -199,7 +239,13 @@
#define LVCS_STEREODELAY_CS_24KHZ 279 /* Sample rate 24kS/s */
#define LVCS_STEREODELAY_CS_32KHZ 372 /* Sample rate 32kS/s */
#define LVCS_STEREODELAY_CS_44KHZ 512 /* Sample rate 44kS/s */
+// TODO: this should linearly scale by frequency but is limited to 512 frames until
+// we ensure enough buffer size has been allocated.
#define LVCS_STEREODELAY_CS_48KHZ 512 /* Sample rate 48kS/s */
+#define LVCS_STEREODELAY_CS_88KHZ 512 /* Sample rate 88.2kS/s */
+#define LVCS_STEREODELAY_CS_96KHZ 512 /* Sample rate 96kS/s */
+#define LVCS_STEREODELAY_CS_176KHZ 512 /* Sample rate 176.4kS/s */
+#define LVCS_STEREODELAY_CS_192KHZ 512 /* Sample rate 196kS/s */
/* Reverb coefficients for 8000 Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_8000_A0 0.667271
@@ -275,6 +321,14 @@
#define CS_REVERB_48000_SCALE 14
#ifdef HIGHER_FS
+/* Reverb coefficients for 88200Hz sample rate, scaled with 0.8 */
+/* Band pass filter with fc1=500 and fc2=8000 */
+#define CS_REVERB_88200_A0 0.171901f
+#define CS_REVERB_88200_A1 0.000000f
+#define CS_REVERB_88200_A2 (-0.171901f)
+#define CS_REVERB_88200_B1 (-1.553948f)
+#define CS_REVERB_88200_B2 (0.570248f)
+#define CS_REVERB_88200_SCALE 14
/* Reverb coefficients for 96000Hz sample rate, scaled with 0.8 */
/* Band pass filter with fc1=500 and fc2=8000*/
#define CS_REVERB_96000_A0 0.1602488
@@ -284,6 +338,14 @@
#define CS_REVERB_96000_B2 0.599377
#define CS_REVERB_96000_SCALE 14
+/* Reverb coefficients for 176400Hz sample rate, scaled with 0.8 */
+/* Band pass filter with fc1=500 and fc2=8000 */
+#define CS_REVERB_176400_A0 0.094763f
+#define CS_REVERB_176400_A1 0.000000f
+#define CS_REVERB_176400_A2 (-0.094763f)
+#define CS_REVERB_176400_B1 (-1.758593f)
+#define CS_REVERB_176400_B2 (0.763091f)
+#define CS_REVERB_176400_SCALE 14
/* Reverb coefficients for 192000Hz sample rate, scaled with 0.8 */
/* Band pass filter with fc1=500 and fc2=8000*/
#define CS_REVERB_192000_A0 0.0878369
@@ -446,6 +508,24 @@
#ifdef HIGHER_FS
+/* Equaliser coefficients for 88200Hz sample rate.
+ * The filter coefficients are obtained by carrying out
+ * state-space analysis using the coefficients available
+ * for 44100Hz.
+ */
+#define CS_EQUALISER_88200_A0 1.771899f
+#define CS_EQUALISER_88200_A1 (-2.930762f)
+#define CS_EQUALISER_88200_A2 1.172175f
+#define CS_EQUALISER_88200_B1 (-1.438349f)
+#define CS_EQUALISER_88200_B2 0.442520f
+#define CS_EQUALISER_88200_SCALE 13
+#define CSEX_EQUALISER_88200_A0 2.675241f
+#define CSEX_EQUALISER_88200_A1 (-4.466154f)
+#define CSEX_EQUALISER_88200_A2 1.810305f
+#define CSEX_EQUALISER_88200_B1 (-0.925350f)
+#define CSEX_EQUALISER_88200_B2 (-0.066616f)
+#define CSEX_EQUALISER_88200_SCALE 13
+
#define CS_EQUALISER_96000_A0 1.784497
#define CS_EQUALISER_96000_A1 (-3.001435)
#define CS_EQUALISER_96000_A2 1.228422
@@ -458,6 +538,23 @@
#define CSEX_EQUALISER_96000_B1 (-0.971718)
#define CSEX_EQUALISER_96000_B2 (-0.021216)
#define CSEX_EQUALISER_96000_SCALE 13
+/* Equaliser coefficients for 176400Hz sample rate.
+ * The filter coefficients are obtained by carrying out
+ * state-space analysis using the coefficients available
+ * for 44100Hz.
+ */
+#define CS_EQUALISER_176400_A0 1.883440f
+#define CS_EQUALISER_176400_A1 (-3.414272f)
+#define CS_EQUALISER_176400_A2 1.534702f
+#define CS_EQUALISER_176400_B1 (-1.674614f)
+#define CS_EQUALISER_176400_B2 0.675827f
+#define CS_EQUALISER_176400_SCALE 13
+#define CSEX_EQUALISER_176400_A0 3.355068f
+#define CSEX_EQUALISER_176400_A1 (-6.112578f)
+#define CSEX_EQUALISER_176400_A2 2.764135f
+#define CSEX_EQUALISER_176400_B1 (-1.268533f)
+#define CSEX_EQUALISER_176400_B2 0.271277f
+#define CSEX_EQUALISER_176400_SCALE 13
#define CS_EQUALISER_192000_A0 1.889582
#define CS_EQUALISER_192000_A1 (-3.456140)
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
index 0765764..a1fb48f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
@@ -74,10 +74,18 @@
(LVM_UINT16 )CS_MIDDLE_48000_SCALE}
#ifdef HIGHER_FS
,
+ {CS_MIDDLE_88200_A0, /* 88kS/s coefficients */
+ CS_MIDDLE_88200_A1,
+ CS_MIDDLE_88200_B1,
+ (LVM_UINT16)CS_MIDDLE_88200_SCALE},
{CS_MIDDLE_96000_A0, /* 96kS/s coefficients */
CS_MIDDLE_96000_A1,
CS_MIDDLE_96000_B1,
(LVM_UINT16 )CS_MIDDLE_96000_SCALE},
+ {CS_MIDDLE_176400_A0, /* 176kS/s coefficients */
+ CS_MIDDLE_176400_A1,
+ CS_MIDDLE_176400_B1,
+ (LVM_UINT16)CS_MIDDLE_176400_SCALE},
{CS_MIDDLE_192000_A0, /* 192kS/s coefficients */
CS_MIDDLE_192000_A1,
CS_MIDDLE_192000_B1,
@@ -144,12 +152,24 @@
(LVM_UINT16 )CS_SIDE_48000_SCALE}
#ifdef HIGHER_FS
,
+ {CS_SIDE_88200_A0, /* 88kS/s coefficients */
+ CS_SIDE_88200_A1,
+ CS_SIDE_88200_A2,
+ CS_SIDE_88200_B1,
+ CS_SIDE_88200_B2,
+ (LVM_UINT16)CS_SIDE_88200_SCALE},
{CS_SIDE_96000_A0, /* 96kS/s coefficients */
CS_SIDE_96000_A1,
CS_SIDE_96000_A2,
CS_SIDE_96000_B1,
CS_SIDE_96000_B2,
(LVM_UINT16 )CS_SIDE_96000_SCALE},
+ {CS_SIDE_176400_A0, /*176kS/s coefficients */
+ CS_SIDE_176400_A1,
+ CS_SIDE_176400_A2,
+ CS_SIDE_176400_B1,
+ CS_SIDE_176400_B2,
+ (LVM_UINT16)CS_SIDE_176400_SCALE},
{CS_SIDE_192000_A0, /* 192kS/s coefficients */
CS_SIDE_192000_A1,
CS_SIDE_192000_A2,
@@ -223,12 +243,24 @@
CS_EQUALISER_48000_B2,
(LVM_UINT16 )CS_EQUALISER_48000_SCALE},
#ifdef HIGHER_FS
+ {CS_EQUALISER_88200_A0, /* 88kS/s coeffieients */
+ CS_EQUALISER_88200_A1,
+ CS_EQUALISER_88200_A2,
+ CS_EQUALISER_88200_B1,
+ CS_EQUALISER_88200_B2,
+ (LVM_UINT16)CS_EQUALISER_88200_SCALE},
{CS_EQUALISER_96000_A0, /* 96kS/s coefficients */
CS_EQUALISER_96000_A1,
CS_EQUALISER_96000_A2,
CS_EQUALISER_96000_B1,
CS_EQUALISER_96000_B2,
(LVM_UINT16 )CS_EQUALISER_96000_SCALE},
+ {CS_EQUALISER_176400_A0, /* 176kS/s coefficients */
+ CS_EQUALISER_176400_A1,
+ CS_EQUALISER_176400_A2,
+ CS_EQUALISER_176400_B1,
+ CS_EQUALISER_176400_B2,
+ (LVM_UINT16)CS_EQUALISER_176400_SCALE},
{CS_EQUALISER_192000_A0, /* 192kS/s coefficients */
CS_EQUALISER_192000_A1,
CS_EQUALISER_192000_A2,
@@ -294,12 +326,24 @@
(LVM_UINT16 )CSEX_EQUALISER_48000_SCALE}
#ifdef HIGHER_FS
,
+ {CSEX_EQUALISER_88200_A0, /* 88kS/s coefficients */
+ CSEX_EQUALISER_88200_A1,
+ CSEX_EQUALISER_88200_A2,
+ CSEX_EQUALISER_88200_B1,
+ CSEX_EQUALISER_88200_B2,
+ (LVM_UINT16)CSEX_EQUALISER_88200_SCALE},
{CSEX_EQUALISER_96000_A0, /* 96kS/s coefficients */
CSEX_EQUALISER_96000_A1,
CSEX_EQUALISER_96000_A2,
CSEX_EQUALISER_96000_B1,
CSEX_EQUALISER_96000_B2,
(LVM_UINT16 )CSEX_EQUALISER_96000_SCALE},
+ {CSEX_EQUALISER_176400_A0, /* 176kS/s coefficients */
+ CSEX_EQUALISER_176400_A1,
+ CSEX_EQUALISER_176400_A2,
+ CSEX_EQUALISER_176400_B1,
+ CSEX_EQUALISER_176400_B2,
+ (LVM_UINT16)CSEX_EQUALISER_176400_SCALE},
{CSEX_EQUALISER_192000_A0, /* 192kS/s coefficients */
CSEX_EQUALISER_192000_A1,
CSEX_EQUALISER_192000_A2,
@@ -326,7 +370,12 @@
LVCS_STEREODELAY_CS_24KHZ,
LVCS_STEREODELAY_CS_32KHZ,
LVCS_STEREODELAY_CS_44KHZ,
- LVCS_STEREODELAY_CS_48KHZ};
+ LVCS_STEREODELAY_CS_48KHZ,
+ LVCS_STEREODELAY_CS_88KHZ,
+ LVCS_STEREODELAY_CS_96KHZ,
+ LVCS_STEREODELAY_CS_176KHZ,
+ LVCS_STEREODELAY_CS_192KHZ,
+};
/************************************************************************************/
/* */
@@ -392,12 +441,24 @@
(LVM_UINT16 )CS_REVERB_48000_SCALE}
#ifdef HIGHER_FS
,
+ {CS_REVERB_88200_A0, /* 88kS/s coefficients */
+ CS_REVERB_88200_A1,
+ CS_REVERB_88200_A2,
+ CS_REVERB_88200_B1,
+ CS_REVERB_88200_B2,
+ (LVM_UINT16)CS_REVERB_88200_SCALE},
{CS_REVERB_96000_A0, /* 96kS/s coefficients */
CS_REVERB_96000_A1,
CS_REVERB_96000_A2,
CS_REVERB_96000_B1,
CS_REVERB_96000_B2,
(LVM_UINT16 )CS_REVERB_96000_SCALE},
+ {CS_REVERB_176400_A0, /* 176kS/s coefficients */
+ CS_REVERB_176400_A1,
+ CS_REVERB_176400_A2,
+ CS_REVERB_176400_B1,
+ CS_REVERB_176400_B2,
+ (LVM_UINT16)CS_REVERB_176400_SCALE},
{CS_REVERB_192000_A0, /* 192kS/s coefficients */
CS_REVERB_192000_A1,
CS_REVERB_192000_A2,
@@ -509,12 +570,14 @@
#define LVCS_VOL_TC_Fs44100 32734 /* Floating point value 0.998962402 */
#define LVCS_VOL_TC_Fs48000 32737 /* Floating point value 0.999053955 */
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
+#define LVCS_VOL_TC_Fs88200 32751 /* Floating point value 0.999481066 */
#define LVCS_VOL_TC_Fs96000 32751 /* Floating point value 0.999511703 */ /* Todo @ need to re check this value*/
+#define LVCS_VOL_TC_Fs176400 32759 /* Floating point value 0.999740499 */
#define LVCS_VOL_TC_Fs192000 32763 /* Floating point value 0.999877925 */ /* Todo @ need to re check this value*/
#endif
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
-const LVM_INT16 LVCS_VolumeTCTable[11] = {LVCS_VOL_TC_Fs8000,
+const LVM_INT16 LVCS_VolumeTCTable[13] = {LVCS_VOL_TC_Fs8000,
LVCS_VOL_TC_Fs11025,
LVCS_VOL_TC_Fs12000,
LVCS_VOL_TC_Fs16000,
@@ -523,7 +586,9 @@
LVCS_VOL_TC_Fs32000,
LVCS_VOL_TC_Fs44100,
LVCS_VOL_TC_Fs48000,
+ LVCS_VOL_TC_Fs88200,
LVCS_VOL_TC_Fs96000,
+ LVCS_VOL_TC_Fs176400,
LVCS_VOL_TC_Fs192000
};
#else
@@ -545,7 +610,7 @@
/* */
/************************************************************************************/
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
-const LVM_INT32 LVCS_SampleRateTable[11] = {8000,
+const LVM_INT32 LVCS_SampleRateTable[13] = {8000,
11025,
12000,
16000,
@@ -554,7 +619,9 @@
32000,
44100,
48000,
+ 88200,
96000,
+ 176400,
192000
};
#else
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 340469a..861ee64 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -20,30 +20,69 @@
# location of test files
testdir="/data/local/tmp/lvmTest"
-#flags="-bE -tE -eqE -csE"
-flags="-csE -tE -eqE"
-
-
echo "========================================"
echo "testing lvm"
-adb shell mkdir $testdir
+adb shell mkdir -p $testdir
adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
-# run multichannel effects at different channel counts, saving only the stereo channel pair.
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_1.raw\
- -ch:1 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_2.raw\
- -ch:2 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_4.raw\
- -ch:4 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_6.raw\
- -ch:6 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_8.raw\
- -ch:8 -fs:44100 $flags
+flags_arr=(
+ "-csE"
+ "-eqE"
+ "-tE"
+ "-csE -tE -eqE"
+ "-bE"
+ "-csE -tE"
+ "-csE -eqE" "-tE -eqE"
+ "-csE -tE -bE -eqE"
+)
-# two channel files should be identical to higher channel computation (first 2 channels).
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_2.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_4.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_6.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_8.raw
+fs_arr=(
+ 8000
+ 11025
+ 12000
+ 16000
+ 22050
+ 24000
+ 32000
+ 44100
+ 48000
+ 88200
+ 96000
+ 176400
+ 192000
+)
+
+ch_arr=(
+ 1
+ 2
+ 4
+ 6
+ 8
+)
+
+# run multichannel effects at different configs, saving only the stereo channel
+# pair.
+for flags in "${flags_arr[@]}"
+do
+ for fs in ${fs_arr[*]}
+ do
+ for ch in ${ch_arr[*]}
+ do
+ adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
+ -o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
+
+ # two channel files should be identical to higher channel
+ # computation (first 2 channels).
+ # Do not compare cases where -bE is in flags (due to mono computation)
+ if [[ $flags != *"-bE"* ]] && [ "$ch" -gt 2 ]
+ then
+ adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
+ $testdir/sinesweep_$((ch))_$((fs)).raw
+ fi
+
+ done
+ done
+done
+
+adb shell rm -r $testdir
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 01c5955..43271d2 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -76,6 +76,7 @@
int samplingFreq = 44100;
int nrChannels = 2;
int fChannels = 2;
+ bool monoMode = false;
int bassEffectLevel = 0;
int eqPresetLevel = 0;
int frameLength = 256;
@@ -98,6 +99,8 @@
printf("\n");
printf("\n -ch:<process_channels> (1 through 8)\n\n");
printf("\n -fch:<file_channels> (1 through 8)\n\n");
+ printf("\n -M");
+ printf("\n Mono mode (force all input audio channels to be identical)");
printf("\n -basslvl:<effect_level>");
printf("\n A value that ranges between 0 - 15 default 0");
printf("\n");
@@ -447,19 +450,69 @@
lvmConfigParams_t *plvmConfigParams,
LVM_ControlParams_t *params) {
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
- LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS]; /* Equaliser band definitions */
- int eqPresetLevel = plvmConfigParams->eqPresetLevel;
- int nrChannels = plvmConfigParams->nrChannels;
- params->NrChannels = nrChannels;
/* Set the initial process parameters */
/* General parameters */
params->OperatingMode = LVM_MODE_ON;
- params->SampleRate = LVM_FS_44100;
- params->SourceFormat = LVM_STEREO;
params->SpeakerType = LVM_HEADPHONES;
- pContext->pBundledContext->SampleRate = LVM_FS_44100;
+ const int nrChannels = plvmConfigParams->nrChannels;
+ params->NrChannels = nrChannels;
+ if (nrChannels == 1) {
+ params->SourceFormat = LVM_MONO;
+ } else if (nrChannels == 2) {
+ params->SourceFormat = LVM_STEREO;
+ } else if (nrChannels > 2 && nrChannels <= 8) { // FCC_2 FCC_8
+ params->SourceFormat = LVM_MULTICHANNEL;
+ } else {
+ return -EINVAL;
+ }
+
+ LVM_Fs_en sampleRate;
+ switch (plvmConfigParams->samplingFreq) {
+ case 8000:
+ sampleRate = LVM_FS_8000;
+ break;
+ case 11025:
+ sampleRate = LVM_FS_11025;
+ break;
+ case 12000:
+ sampleRate = LVM_FS_12000;
+ break;
+ case 16000:
+ sampleRate = LVM_FS_16000;
+ break;
+ case 22050:
+ sampleRate = LVM_FS_22050;
+ break;
+ case 24000:
+ sampleRate = LVM_FS_24000;
+ break;
+ case 32000:
+ sampleRate = LVM_FS_32000;
+ break;
+ case 44100:
+ sampleRate = LVM_FS_44100;
+ break;
+ case 48000:
+ sampleRate = LVM_FS_48000;
+ break;
+ case 88200:
+ sampleRate = LVM_FS_88200;
+ break;
+ case 96000:
+ sampleRate = LVM_FS_96000;
+ break;
+ case 176400:
+ sampleRate = LVM_FS_176400;
+ break;
+ case 192000:
+ sampleRate = LVM_FS_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ params->SampleRate = sampleRate;
/* Concert Sound parameters */
params->VirtualizerOperatingMode = plvmConfigParams->csEnable;
@@ -468,14 +521,17 @@
params->CS_EffectLevel = LVM_CS_EFFECT_NONE;
/* N-Band Equaliser parameters */
- params->EQNB_OperatingMode = plvmConfigParams->eqEnable;
- params->pEQNB_BandDefinition = &BandDefs[0];
+ const int eqPresetLevel = plvmConfigParams->eqPresetLevel;
+ LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS]; /* Equaliser band definitions */
for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
BandDefs[i].Frequency = EQNB_5BandPresetsFrequencies[i];
BandDefs[i].QFactor = EQNB_5BandPresetsQFactors[i];
BandDefs[i].Gain =
EQNB_5BandSoftPresets[(FIVEBAND_NUMBANDS * eqPresetLevel) + i];
}
+ params->EQNB_OperatingMode = plvmConfigParams->eqEnable;
+ // Caution: raw pointer to stack data, stored in instance by LVM_SetControlParameters.
+ params->pEQNB_BandDefinition = &BandDefs[0];
/* Volume Control parameters */
params->VC_EffectLevel = 0;
@@ -490,16 +546,6 @@
/* Bass Enhancement parameters */
params->BE_OperatingMode = plvmConfigParams->bassEnable;
- if (nrChannels == 1) {
- params->SourceFormat = LVM_MONO;
- }
- if (nrChannels == 2) {
- params->SourceFormat = LVM_STEREO;
- }
- if ((nrChannels > 2) && (nrChannels <= 8)) {
- params->SourceFormat = LVM_MULTICHANNEL;
- }
-
/* Activate the initial settings */
LvmStatus =
LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
@@ -569,6 +615,15 @@
}
memcpy_to_float_from_i16(floatIn.data(), in.data(), frameLength * channelCount);
+ // Mono mode will replicate the first channel to all other channels.
+ // This ensures all audio channels are identical. This is useful for testing
+ // Bass Boost, which extracts a mono signal for processing.
+ if (plvmConfigParams->monoMode && channelCount > 1) {
+ for (int i = 0; i < frameLength; ++i) {
+ auto *fp = &floatIn[i * channelCount];
+ std::fill(fp + 1, fp + channelCount, *fp); // replicate ch 0
+ }
+ }
#if 1
errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
if (errCode) {
@@ -613,7 +668,9 @@
samplingFreq != 12000 && samplingFreq != 16000 &&
samplingFreq != 22050 && samplingFreq != 24000 &&
samplingFreq != 32000 && samplingFreq != 44100 &&
- samplingFreq != 48000 && samplingFreq != 96000) {
+ samplingFreq != 48000 && samplingFreq != 88200 &&
+ samplingFreq != 96000 && samplingFreq != 176400 &&
+ samplingFreq != 192000) {
ALOGE("\nError: Unsupported Sampling Frequency : %d\n", samplingFreq);
return -1;
}
@@ -632,6 +689,8 @@
return -1;
}
lvmConfigParams.fChannels = fChannels;
+ } else if (!strcmp(argv[i],"-M")) {
+ lvmConfigParams.monoMode = true;
} else if (!strncmp(argv[i], "-basslvl:", 9)) {
const int bassEffectLevel = atoi(argv[i] + 9);
if (bassEffectLevel > 15 || bassEffectLevel < 0) {
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 09e9964..b5860de 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1275,10 +1275,18 @@
pContext->pBundledContext->SamplesPerSecond = 48000 * NrChannels;
break;
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
+ case 88200:
+ SampleRate = LVM_FS_88200;
+ pContext->pBundledContext->SamplesPerSecond = 88200 * NrChannels;
+ break;
case 96000:
SampleRate = LVM_FS_96000;
pContext->pBundledContext->SamplesPerSecond = 96000 * NrChannels;
break;
+ case 176400:
+ SampleRate = LVM_FS_176400;
+ pContext->pBundledContext->SamplesPerSecond = 176400 * NrChannels;
+ break;
case 192000:
SampleRate = LVM_FS_192000;
pContext->pBundledContext->SamplesPerSecond = 192000 * NrChannels;
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index d558169..602f607 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -675,9 +675,15 @@
SampleRate = LVM_FS_48000;
break;
#if defined(BUILD_FLOAT) && defined(HIGHER_FS)
+ case 88200:
+ SampleRate = LVM_FS_88200;
+ break;
case 96000:
SampleRate = LVM_FS_96000;
break;
+ case 176400:
+ SampleRate = LVM_FS_176400;
+ break;
case 192000:
SampleRate = LVM_FS_192000;
break;
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index b2c91c4..56ee18e 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -65,7 +65,7 @@
GET_ROUTED_DEVICE_ID,
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
-
+ GET_PORT_ID,
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -407,6 +407,23 @@
return status;
}
+ status_t getPortId(audio_port_handle_t *portId)
+ {
+ ALOGV("getPortId");
+ if (portId == nullptr) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_PORT_ID, data, &reply);
+ if (status != OK
+ || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+ *portId = AUDIO_PORT_HANDLE_NONE;
+ return status;
+ }
+ *portId = (audio_port_handle_t)reply.readInt32();
+ return NO_ERROR;
+ }
};
IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
@@ -661,6 +678,17 @@
return NO_ERROR;
}
+ case GET_PORT_ID: {
+ ALOGV("GET_PORT_ID");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ audio_port_handle_t portId;
+ status_t status = getPortId(&portId);
+ reply->writeInt32(status);
+ if (status == NO_ERROR) {
+ reply->writeInt32(portId);
+ }
+ return NO_ERROR;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index 1150d61..d8ef9cf 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -50,24 +50,15 @@
mDataSource = nullptr;
}
-MidiIoWrapper::MidiIoWrapper(DataSourceBase *source) {
- ALOGV("MidiIoWrapper(DataSource)");
- mFd = -1;
- mDataSource = source;
- off64_t l;
- if (mDataSource->getSize(&l) == OK) {
- mLength = l;
- } else {
- mLength = 0;
- }
-}
-
class DataSourceUnwrapper : public DataSourceBase {
public:
explicit DataSourceUnwrapper(CDataSource *csource) {
mSource = csource;
}
+
+ virtual ~DataSourceUnwrapper() {}
+
virtual status_t initCheck() const { return OK; }
// Returns the number of bytes read, or -1 on failure. It's not an error if
@@ -98,6 +89,7 @@
MidiIoWrapper::MidiIoWrapper(CDataSource *csource) {
ALOGV("MidiIoWrapper(CDataSource)");
mFd = -1;
+ mBase = 0;
mDataSource = new DataSourceUnwrapper(csource);
off64_t l;
if (mDataSource->getSize(&l) == OK) {
@@ -112,6 +104,7 @@
if (mFd >= 0) {
close(mFd);
}
+ delete mDataSource;
}
int MidiIoWrapper::readAt(void *buffer, int offset, int size) {
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index eed96e7..156991e 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -57,6 +57,10 @@
AMEDIAFORMAT_KEY_COLOR_STANDARD,
AMEDIAFORMAT_KEY_COLOR_TRANSFER,
AMEDIAFORMAT_KEY_COMPLEXITY,
+ AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE,
+ AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK,
+ AMEDIAFORMAT_KEY_CRYPTO_MODE,
+ AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK,
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
AMEDIAFORMAT_KEY_GRID_COLUMNS,
AMEDIAFORMAT_KEY_GRID_ROWS,
@@ -102,6 +106,8 @@
};
static const char *AMediaFormatKeyGroupBuffer[] = {
+ AMEDIAFORMAT_KEY_CRYPTO_IV,
+ AMEDIAFORMAT_KEY_CRYPTO_KEY,
AMEDIAFORMAT_KEY_HDR_STATIC_INFO,
AMEDIAFORMAT_KEY_SEI,
AMEDIAFORMAT_KEY_MPEG_USER_DATA,
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index fb861d7..b5a7172 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -285,6 +285,7 @@
template <>
const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DEFAULT),
MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
@@ -361,6 +362,22 @@
TERMINATOR
};
+template <>
+const AudioFlagConverter::Table AudioFlagConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_AUDIBILITY_ENFORCED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_SECURE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BEACON),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_HW_AV_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_HW_HOTWORD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_MUTE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_LOW_LATENCY),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_DEEP_BUFFER),
+ TERMINATOR
+};
+
template class TypeConverter<OutputDeviceTraits>;
template class TypeConverter<InputDeviceTraits>;
template class TypeConverter<OutputFlagTraits>;
@@ -374,6 +391,7 @@
template class TypeConverter<AudioModeTraits>;
template class TypeConverter<UsageTraits>;
template class TypeConverter<SourceTraits>;
+template class TypeConverter<AudioFlagTraits>;
bool deviceFromString(const std::string& literalDevice, audio_devices_t& device) {
return InputDeviceConverter::fromString(literalDevice, device) ||
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 379000e..e7c466d 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,7 +73,7 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
-
+ virtual status_t getPortId(audio_port_handle_t *portId) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 5340dde..e1c5d47 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,6 +72,7 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+ virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
diff --git a/media/libmedia/include/media/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
index 6309dda..b19d49e 100644
--- a/media/libmedia/include/media/MidiIoWrapper.h
+++ b/media/libmedia/include/media/MidiIoWrapper.h
@@ -24,12 +24,12 @@
namespace android {
struct CDataSource;
+class DataSourceUnwrapper;
class MidiIoWrapper {
public:
explicit MidiIoWrapper(const char *path);
explicit MidiIoWrapper(int fd, off64_t offset, int64_t size);
- explicit MidiIoWrapper(DataSourceBase *source);
explicit MidiIoWrapper(CDataSource *csource);
~MidiIoWrapper();
@@ -43,7 +43,7 @@
int mFd;
off64_t mBase;
int64_t mLength;
- DataSourceBase *mDataSource;
+ DataSourceUnwrapper *mDataSource;
EAS_FILE mEasFile;
};
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 86f0d4c..418e09c 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -20,6 +20,7 @@
#include <string>
#include <string.h>
+#include <vector>
#include <system/audio.h>
#include <utils/Log.h>
#include <utils/Vector.h>
@@ -30,77 +31,55 @@
namespace android {
-struct SampleRateTraits
-{
- typedef uint32_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct DeviceTraits
-{
- typedef audio_devices_t Type;
- typedef Vector<Type> Collection;
-};
-struct OutputDeviceTraits : public DeviceTraits {};
-struct InputDeviceTraits : public DeviceTraits {};
-struct OutputFlagTraits
-{
- typedef audio_output_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct InputFlagTraits
-{
- typedef audio_input_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct FormatTraits
-{
- typedef audio_format_t Type;
- typedef Vector<Type> Collection;
-};
-struct ChannelTraits
-{
- typedef audio_channel_mask_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct OutputChannelTraits : public ChannelTraits {};
-struct InputChannelTraits : public ChannelTraits {};
-struct ChannelIndexTraits : public ChannelTraits {};
-struct GainModeTraits
-{
- typedef audio_gain_mode_t Type;
- typedef Vector<Type> Collection;
-};
-struct StreamTraits
-{
- typedef audio_stream_type_t Type;
- typedef Vector<Type> Collection;
-};
-struct AudioModeTraits
-{
- typedef audio_mode_t Type;
- typedef Vector<Type> Collection;
-};
-struct AudioContentTraits
-{
- typedef audio_content_type_t Type;
- typedef Vector<Type> Collection;
-};
-struct UsageTraits
-{
- typedef audio_usage_t Type;
- typedef Vector<Type> Collection;
-};
-struct SourceTraits
-{
- typedef audio_source_t Type;
- typedef Vector<Type> Collection;
-};
template <typename T>
struct DefaultTraits
{
typedef T Type;
- typedef Vector<Type> Collection;
+ typedef std::vector<Type> Collection;
+ static void add(Collection &collection, Type value)
+ {
+ collection.push_back(value);
+ }
};
+template <typename T>
+struct VectorTraits
+{
+ typedef T Type;
+ typedef Vector<Type> Collection;
+ static void add(Collection &collection, Type value)
+ {
+ collection.add(value);
+ }
+};
+template <typename T>
+struct SortedVectorTraits
+{
+ typedef T Type;
+ typedef SortedVector<Type> Collection;
+ static void add(Collection &collection, Type value)
+ {
+ collection.add(value);
+ }
+};
+
+using SampleRateTraits = SortedVectorTraits<uint32_t>;
+using DeviceTraits = DefaultTraits<audio_devices_t>;
+struct OutputDeviceTraits : public DeviceTraits {};
+struct InputDeviceTraits : public DeviceTraits {};
+using ChannelTraits = SortedVectorTraits<audio_channel_mask_t>;
+struct OutputChannelTraits : public ChannelTraits {};
+struct InputChannelTraits : public ChannelTraits {};
+struct ChannelIndexTraits : public ChannelTraits {};
+using InputFlagTraits = DefaultTraits<audio_input_flags_t>;
+using OutputFlagTraits = DefaultTraits<audio_output_flags_t>;
+using FormatTraits = VectorTraits<audio_format_t>;
+using GainModeTraits = DefaultTraits<audio_gain_mode_t>;
+using StreamTraits = DefaultTraits<audio_stream_type_t>;
+using AudioModeTraits = DefaultTraits<audio_mode_t>;
+using AudioContentTraits = DefaultTraits<audio_content_type_t>;
+using UsageTraits = DefaultTraits<audio_usage_t>;
+using SourceTraits = DefaultTraits<audio_source_t>;
+struct AudioFlagTraits : public DefaultTraits<audio_flags_mask_t> {};
template <class Traits>
static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
@@ -110,7 +89,7 @@
for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
typename Traits::Type value;
if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
- collection.add(value);
+ Traits::add(collection, value);
}
}
free(literal);
@@ -181,7 +160,7 @@
for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
typename Traits::Type value;
if (fromString(cstr, value)) {
- collection.add(value);
+ Traits::add(collection, value);
}
}
free(literal);
@@ -234,6 +213,7 @@
typedef TypeConverter<AudioContentTraits> AudioContentTypeConverter;
typedef TypeConverter<UsageTraits> UsageTypeConverter;
typedef TypeConverter<SourceTraits> SourceTypeConverter;
+typedef TypeConverter<AudioFlagTraits> AudioFlagConverter;
template<> const OutputDeviceConverter::Table OutputDeviceConverter::mTable[];
template<> const InputDeviceConverter::Table InputDeviceConverter::mTable[];
@@ -249,6 +229,7 @@
template<> const AudioContentTypeConverter::Table AudioContentTypeConverter::mTable[];
template<> const UsageTypeConverter::Table UsageTypeConverter::mTable[];
template<> const SourceTypeConverter::Table SourceTypeConverter::mTable[];
+template<> const AudioFlagConverter::Table AudioFlagConverter::mTable[];
bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
@@ -274,6 +255,69 @@
OutputChannelTraits::Collection outputChannelMasksFromString(
const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
+static inline std::string toString(audio_usage_t usage)
+{
+ std::string usageLiteral;
+ if (!android::UsageTypeConverter::toString(usage, usageLiteral)) {
+ ALOGV("failed to convert usage: %d", usage);
+ return "AUDIO_USAGE_UNKNOWN";
+ }
+ return usageLiteral;
+}
+
+static inline std::string toString(audio_content_type_t content)
+{
+ std::string contentLiteral;
+ if (!android::AudioContentTypeConverter::toString(content, contentLiteral)) {
+ ALOGV("failed to convert content type: %d", content);
+ return "AUDIO_CONTENT_TYPE_UNKNOWN";
+ }
+ return contentLiteral;
+}
+
+static inline std::string toString(audio_stream_type_t stream)
+{
+ std::string streamLiteral;
+ if (!android::StreamTypeConverter::toString(stream, streamLiteral)) {
+ ALOGV("failed to convert stream: %d", stream);
+ return "AUDIO_STREAM_DEFAULT";
+ }
+ return streamLiteral;
+}
+
+static inline std::string toString(audio_source_t source)
+{
+ std::string sourceLiteral;
+ if (!android::SourceTypeConverter::toString(source, sourceLiteral)) {
+ ALOGV("failed to convert source: %d", source);
+ return "AUDIO_SOURCE_DEFAULT";
+ }
+ return sourceLiteral;
+}
+
+static inline std::string toString(const audio_attributes_t &attributes)
+{
+ std::ostringstream result;
+ result << "{ Content type: " << toString(attributes.content_type)
+ << " Usage: " << toString(attributes.usage)
+ << " Source: " << toString(attributes.source)
+ << " Flags: " << attributes.flags
+ << " Tags: " << attributes.tags
+ << " }";
+
+ return result.str();
+}
+
+static inline std::string toString(audio_mode_t mode)
+{
+ std::string modeLiteral;
+ if (!android::AudioModeConverter::toString(mode, modeLiteral)) {
+ ALOGV("failed to convert mode: %d", mode);
+ return "AUDIO_MODE_INVALID";
+ }
+ return modeLiteral;
+}
+
}; // namespace android
#endif /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index bdf1aae..33be559 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -115,9 +115,6 @@
* The state machine of the media_recorder.
*/
enum media_recorder_states {
- // Error state.
- MEDIA_RECORDER_ERROR = 0,
-
// Recorder was just created.
MEDIA_RECORDER_IDLE = 1 << 0,
@@ -132,6 +129,9 @@
// Recording is in progress.
MEDIA_RECORDER_RECORDING = 1 << 4,
+
+ // Error state.
+ MEDIA_RECORDER_ERROR = 1 << 5,
};
// The "msg" code passed to the listener in notify.
@@ -264,6 +264,7 @@
status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
status_t enableAudioDeviceCallback(bool enabled);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ status_t getPortId(audio_port_handle_t *portId) const;
private:
void doCleanUp();
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 92cfb1c..d07e703 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,4 +842,15 @@
return mMediaRecorder->getActiveMicrophones(activeMicrophones);
}
+status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
+{
+ ALOGV("getPortId");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ return mMediaRecorder->getPortId(portId);
+}
+
} // namespace android
diff --git a/media/libmediaextractor/Android.bp b/media/libmediaextractor/Android.bp
index 6f2b35f..4758cd6 100644
--- a/media/libmediaextractor/Android.bp
+++ b/media/libmediaextractor/Android.bp
@@ -14,11 +14,19 @@
"-Wall",
],
+ static: {
+ cflags: [
+ "-Wno-multichar",
+ "-Werror",
+ "-Wall",
+ "-DNO_IMEMORY",
+ ],
+ },
+
shared_libs: [
"libbinder",
"libstagefright_foundation",
"libutils",
- "libcutils",
"liblog",
],
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index 26d0bd4..bab3a03 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -51,9 +51,12 @@
mRangeLength(size),
mOwnsData(true),
mMetaData(new MetaDataBase) {
+#ifndef NO_IMEMORY
if (size < kSharedMemThreshold
|| std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
+#endif
mData = malloc(size);
+#ifndef NO_IMEMORY
} else {
ALOGV("creating memoryDealer");
sp<MemoryDealer> memoryDealer =
@@ -71,6 +74,7 @@
ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
}
}
+#endif
}
MediaBuffer::MediaBuffer(const sp<ABuffer> &buffer)
diff --git a/media/libmediaextractor/MediaBufferGroup.cpp b/media/libmediaextractor/MediaBufferGroup.cpp
index 4e6beca..84ff9a6 100644
--- a/media/libmediaextractor/MediaBufferGroup.cpp
+++ b/media/libmediaextractor/MediaBufferGroup.cpp
@@ -62,6 +62,7 @@
mInternal->mGrowthLimit = buffers;
}
+#ifndef NO_IMEMORY
if (buffer_size >= kSharedMemoryThreshold) {
ALOGD("creating MemoryDealer");
// Using a single MemoryDealer is efficient for a group of shared memory objects.
@@ -84,6 +85,9 @@
}
return;
}
+#else
+ (void)kSharedMemoryThreshold;
+#endif
// Non-shared memory allocation.
for (size_t i = 0; i < buffers; ++i) {
@@ -121,6 +125,7 @@
buffer->release();
}
delete mInternal;
+ delete mWrapper;
}
void MediaBufferGroup::add_buffer(MediaBufferBase *buffer) {
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index 5b362a4..ace63ae 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -46,12 +46,13 @@
explicit MediaBuffer(size_t size);
explicit MediaBuffer(const sp<ABuffer> &buffer);
-
+#ifndef NO_IMEMORY
MediaBuffer(const sp<IMemory> &mem) :
MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
// delegate and override mMemory
mMemory = mem;
}
+#endif
// If MediaBufferGroup is set, decrement the local reference count;
// if the local reference count drops to 0, return the buffer to the
@@ -92,17 +93,26 @@
}
virtual int remoteRefcount() const {
+#ifndef NO_IMEMORY
if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
int32_t remoteRefcount =
reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
// Sanity check so that remoteRefCount() is non-negative.
return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
+#else
+ return 0;
+#endif
}
// returns old value
int addRemoteRefcount(int32_t value) {
+#ifndef NO_IMEMORY
if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+#else
+ (void) value;
+ return 0;
+#endif
}
bool isDeadObject() const {
@@ -110,8 +120,13 @@
}
static bool isDeadObject(const sp<IMemory> &memory) {
+#ifndef NO_IMEMORY
if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
+#else
+ (void) memory;
+ return false;
+#endif
}
// Sticky on enabling of shared memory MediaBuffers. By default we don't use
@@ -204,7 +219,11 @@
};
inline SharedControl *getSharedControl() const {
+#ifndef NO_IMEMORY
return reinterpret_cast<SharedControl *>(mMemory->pointer());
+#else
+ return nullptr;
+#endif
}
};
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
index eb49f4c..e2cbfc8 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
@@ -75,13 +75,16 @@
virtual int localRefcount() const = 0;
virtual int remoteRefcount() const = 0;
- virtual ~MediaBufferBase() {};
+ virtual ~MediaBufferBase() {
+ delete mWrapper;
+ delete mFormat;
+ };
- CMediaBufferV3 *wrap() {
+ CMediaBuffer *wrap() {
if (mWrapper) {
return mWrapper;
}
- mWrapper = new CMediaBufferV3;
+ mWrapper = new CMediaBuffer;
mWrapper->handle = this;
mWrapper->release = [](void *handle) -> void {
@@ -96,6 +99,14 @@
return ((MediaBufferBase*)handle)->size();
};
+ mWrapper->range_offset = [](void *handle) -> size_t {
+ return ((MediaBufferBase*)handle)->range_offset();
+ };
+
+ mWrapper->range_length = [](void *handle) -> size_t {
+ return ((MediaBufferBase*)handle)->range_length();
+ };
+
mWrapper->set_range = [](void *handle, size_t offset, size_t length) -> void {
return ((MediaBufferBase*)handle)->set_range(offset, length);
};
@@ -116,7 +127,7 @@
mFormat = nullptr;
}
private:
- CMediaBufferV3 *mWrapper;
+ CMediaBuffer *mWrapper;
AMediaFormat *mFormat;
};
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
index dc04556..a162116 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
@@ -59,12 +59,12 @@
// If buffer is nullptr, have acquire_buffer() check for remote release.
virtual void signalBufferReturned(MediaBufferBase *buffer);
- CMediaBufferGroupV3 *wrap() {
+ CMediaBufferGroup *wrap() {
if (mWrapper) {
return mWrapper;
}
- mWrapper = new CMediaBufferGroupV3;
+ mWrapper = new CMediaBufferGroup;
mWrapper->handle = this;
mWrapper->add_buffer = [](void *handle, size_t size) -> void {
@@ -80,7 +80,7 @@
};
mWrapper->acquire_buffer = [](void *handle,
- CMediaBufferV3 **buf, bool nonBlocking, size_t requestedSize) -> media_status_t {
+ CMediaBuffer **buf, bool nonBlocking, size_t requestedSize) -> media_status_t {
MediaBufferBase *acquiredBuf = nullptr;
status_t err = ((MediaBufferGroup*)handle)->acquire_buffer(
&acquiredBuf, nonBlocking, requestedSize);
@@ -100,7 +100,7 @@
}
private:
- CMediaBufferGroupV3 *mWrapper;
+ CMediaBufferGroup *mWrapper;
struct InternalData;
InternalData *mInternal;
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index 9f2deda..b99c14c 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -199,6 +199,7 @@
// HDR related
kKeyHdrStaticInfo = 'hdrS', // HDRStaticInfo
+ kKeyHdr10PlusInfo = 'hdrD', // raw data
// color aspects
kKeyColorRange = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 8f8c478..e188e54 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -10,11 +10,9 @@
],
shared_libs: [
- "libbase",
"libbinder",
"libcutils",
"liblog",
- "libstagefright_foundation",
"libutils",
],
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 6b43375..54309ee 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -91,18 +91,15 @@
"JMedia2HTTPConnection.cpp",
],
+ header_libs: [
+ "libbinder_headers",
+ "libnativehelper_header_only",
+ ],
+
shared_libs: [
- "android.hidl.token@1.0-utils",
"liblog",
- "libcutils",
"libutils",
- "libbinder",
- "libstagefright_foundation",
- "libmediaextractor",
"libdl",
- "libaudioutils",
- "libaudioclient",
- "libnativehelper",
],
include_dirs: [
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index c7cd7d2..a945ffd 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -86,7 +86,7 @@
MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
status_t notifyAt(int64_t mediaTimeUs);
status_t getCurrentPosition(int64_t *msec);
- status_t getDuration(int64_t *msec);
+ status_t getDuration(int64_t srcId, int64_t *msec);
status_t reset();
status_t setAudioStreamType(audio_stream_type_t type);
status_t getAudioStreamType(audio_stream_type_t *type);
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 2ae5a8c..f432059 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -718,8 +718,15 @@
return ret;
}
-status_t MediaPlayer2::getDuration(int64_t *msec) {
+status_t MediaPlayer2::getDuration(int64_t srcId, int64_t *msec) {
Mutex::Autolock _l(mLock);
+ // TODO: cache duration for currentSrcId and nextSrcId, and return correct
+ // value for nextSrcId.
+ if (srcId != mSrcId) {
+ *msec = -1;
+ return OK;
+ }
+
ALOGV("getDuration_l");
bool isValidState = (mCurrentState & (MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
index f01361b..9552580 100644
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
@@ -1286,6 +1286,11 @@
mVideoTimeUs = timeUs;
}
+ sp<AMediaCodecCryptoInfoWrapper> cryptInfo = extractor->getSampleCryptoInfo();
+ if (cryptInfo != NULL) {
+ meta->setObject("cryptInfo", cryptInfo);
+ }
+
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
if (numBuffers == 0 && actualTimeUs != nullptr) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 81ffbc7..080d923 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -791,9 +791,13 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
+ int64_t srcId;
+ CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
+
PlayerMessage* reply;
CHECK(msg->findPointer("reply", (void**)&reply));
+ // TODO: use correct source info based on srcId.
size_t inbandTracks = 0;
if (mCurrentSourceInfo.mSource != NULL) {
inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
@@ -824,10 +828,14 @@
case kWhatGetSelectedTrack:
{
+ int64_t srcId;
+ CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
+
int32_t type32;
CHECK(msg->findInt32("type", (int32_t*)&type32));
media_track_type type = (media_track_type)type32;
+ // TODO: use correct source info based on srcId.
size_t inbandTracks = 0;
status_t err = INVALID_OPERATION;
ssize_t selectedTrack = -1;
@@ -863,15 +871,18 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
+ int64_t srcId;
size_t trackIndex;
int32_t select;
int64_t timeUs;
+ CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
CHECK(msg->findSize("trackIndex", &trackIndex));
CHECK(msg->findInt32("select", &select));
CHECK(msg->findInt64("timeUs", &timeUs));
status_t err = INVALID_OPERATION;
+ // TODO: use correct source info based on srcId.
size_t inbandTracks = 0;
if (mCurrentSourceInfo.mSource != NULL) {
inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
@@ -2324,8 +2335,9 @@
return OK;
}
-status_t NuPlayer2::getTrackInfo(PlayerMessage* reply) const {
+status_t NuPlayer2::getTrackInfo(int64_t srcId, PlayerMessage* reply) const {
sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
+ msg->setInt64("srcId", srcId);
msg->setPointer("reply", reply);
sp<AMessage> response;
@@ -2333,9 +2345,10 @@
return err;
}
-status_t NuPlayer2::getSelectedTrack(int32_t type, PlayerMessage* reply) const {
+status_t NuPlayer2::getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const {
sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
msg->setPointer("reply", reply);
+ msg->setInt64("srcId", srcId);
msg->setInt32("type", type);
sp<AMessage> response;
@@ -2346,8 +2359,9 @@
return err;
}
-status_t NuPlayer2::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+status_t NuPlayer2::selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs) {
sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
+ msg->setInt64("srcId", srcId);
msg->setSize("trackIndex", trackIndex);
msg->setInt32("select", select);
msg->setInt64("timeUs", timeUs);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
index e9b5f11..fdc128f 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -82,9 +82,9 @@
void rewind();
status_t setVideoScalingMode(int32_t mode);
- status_t getTrackInfo(PlayerMessage* reply) const;
- status_t getSelectedTrack(int32_t type, PlayerMessage* reply) const;
- status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
+ status_t getTrackInfo(int64_t srcId, PlayerMessage* reply) const;
+ status_t getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const;
+ status_t selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs);
status_t getCurrentPosition(int64_t *mediaUs);
void getStats(Vector<sp<AMessage> > *mTrackStats);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 49e3e3b..a5bd62d 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -1108,6 +1108,11 @@
} // buffer->data()
} // needsCopy
+ sp<RefBase> cryptInfoObj;
+ if (buffer->meta()->findObject("cryptInfo", &cryptInfoObj)) {
+ cryptInfo = static_cast<AMediaCodecCryptoInfoWrapper *>(cryptInfoObj.get());
+ }
+
status_t err;
if (cryptInfo != NULL) {
err = mCodec->queueSecureInputBuffer(
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 56d708a..2dab2dd 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -603,28 +603,33 @@
case MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO:
{
- return mPlayer->getTrackInfo(response);
+ int64_t srcId = (it++)->int64_value();
+ return mPlayer->getTrackInfo(srcId, response);
}
case MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK:
{
+ int64_t srcId = (it++)->int64_value();
int trackIndex = (it++)->int32_value();
int64_t msec = 0;
// getCurrentPosition should always return OK
getCurrentPosition(&msec);
- return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
+ return mPlayer->selectTrack(srcId, trackIndex, true /* select */, msec * 1000LL);
}
case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
{
+ int64_t srcId = (it++)->int64_value();
int trackIndex = (it++)->int32_value();
- return mPlayer->selectTrack(trackIndex, false /* select */, 0xdeadbeef /* not used */);
+ return mPlayer->selectTrack(
+ srcId, trackIndex, false /* select */, 0xdeadbeef /* not used */);
}
case MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK:
{
+ int64_t srcId = (it++)->int64_value();
int32_t type = (it++)->int32_value();
- return mPlayer->getSelectedTrack(type, response);
+ return mPlayer->getSelectedTrack(srcId, type, response);
}
default:
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index 9d9e179..51e472d 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -28,7 +28,7 @@
#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
-#include <media/stagefright/VideoFrameScheduler.h>
+#include <media/stagefright/VideoFrameScheduler2.h>
#include <media/MediaCodecBuffer.h>
#include <inttypes.h>
@@ -1269,10 +1269,10 @@
mAnchorTimeMediaUs = mediaTimeUs;
}
}
- mNextVideoTimeMediaUs = mediaTimeUs + 100000;
+ mNextVideoTimeMediaUs = mediaTimeUs;
if (!mHasAudio) {
// smooth out videos >= 10fps
- mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
}
if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
@@ -1406,9 +1406,15 @@
mHasAudio = false;
if (mNextVideoTimeMediaUs >= 0) {
int64_t mediaUs = 0;
- mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
- if (mNextVideoTimeMediaUs > mediaUs) {
- mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ int64_t nowUs = ALooper::GetNowUs();
+ status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
+ if (result == OK) {
+ if (mNextVideoTimeMediaUs > mediaUs) {
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
+ } else {
+ mMediaClock->updateAnchor(
+ mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000);
}
}
}
@@ -1436,7 +1442,7 @@
if (mHasVideo) {
if (mVideoScheduler == NULL) {
- mVideoScheduler = new VideoFrameScheduler();
+ mVideoScheduler = new VideoFrameScheduler2();
mVideoScheduler->init();
}
}
@@ -1779,7 +1785,7 @@
void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
if (mVideoScheduler == NULL) {
- mVideoScheduler = new VideoFrameScheduler();
+ mVideoScheduler = new VideoFrameScheduler2();
}
mVideoScheduler->init(fps);
}
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
index 305af68..484d9b7 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
@@ -28,7 +28,7 @@
class JWakeLock;
struct MediaClock;
class MediaCodecBuffer;
-struct VideoFrameScheduler;
+struct VideoFrameSchedulerBase;
struct NuPlayer2::Renderer : public AHandler {
enum Flags {
@@ -156,7 +156,7 @@
List<QueueEntry> mAudioQueue;
List<QueueEntry> mVideoQueue;
uint32_t mNumFramesWritten;
- sp<VideoFrameScheduler> mVideoScheduler;
+ sp<VideoFrameSchedulerBase> mVideoScheduler;
bool mDrainAudioQueuePending;
bool mDrainVideoQueuePending;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 4206647..3fa8e3f 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -537,4 +537,13 @@
}
return NO_INIT;
}
+
+status_t MediaRecorderClient::getPortId(audio_port_handle_t *portId) {
+ ALOGV("getPortId");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->getPortId(portId);
+ }
+ return NO_INIT;
+}
}; // namespace android
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index d2e681f..303cefc 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,6 +109,7 @@
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
+ status_t getPortId(audio_port_handle_t *portId) override;
private:
friend class MediaPlayerService; // for accessing private constructor
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index eae52c2..f2a3038 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -2255,6 +2255,12 @@
return NO_INIT;
}
+status_t StagefrightRecorder::getPortId(audio_port_handle_t *portId) const {
+ if (mAudioSourceNode != 0) {
+ return mAudioSourceNode->getPortId(portId);
+ }
+ return NO_INIT;
+}
status_t StagefrightRecorder::dump(
int fd, const Vector<String16>& args) const {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 2ada301..a292e58 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,7 +77,7 @@
virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
-
+ status_t getPortId(audio_port_handle_t *portId) const override;
private:
mutable Mutex mLock;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index c8f6738..c990b2a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1299,10 +1299,10 @@
mAnchorTimeMediaUs = mediaTimeUs;
}
}
- mNextVideoTimeMediaUs = mediaTimeUs + 100000;
+ mNextVideoTimeMediaUs = mediaTimeUs;
if (!mHasAudio) {
// smooth out videos >= 10fps
- mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
}
if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
@@ -1436,9 +1436,15 @@
mHasAudio = false;
if (mNextVideoTimeMediaUs >= 0) {
int64_t mediaUs = 0;
- mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
- if (mNextVideoTimeMediaUs > mediaUs) {
- mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ int64_t nowUs = ALooper::GetNowUs();
+ status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
+ if (result == OK) {
+ if (mNextVideoTimeMediaUs > mediaUs) {
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
+ } else {
+ mMediaClock->updateAnchor(
+ mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000);
}
}
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index a047975..a521f62 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -28,7 +28,7 @@
class AWakeLock;
struct MediaClock;
class MediaCodecBuffer;
-struct VideoFrameScheduler;
+struct VideoFrameSchedulerBase;
struct NuPlayer::Renderer : public AHandler {
enum Flags {
@@ -156,7 +156,7 @@
List<QueueEntry> mAudioQueue;
List<QueueEntry> mVideoQueue;
uint32_t mNumFramesWritten;
- sp<VideoFrameScheduler> mVideoScheduler;
+ sp<VideoFrameSchedulerBase> mVideoScheduler;
bool mDrainAudioQueuePending;
bool mDrainVideoQueuePending;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 114f492..ceb8a13 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -576,6 +576,7 @@
mTunneled(false),
mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
+ mDescribeHDR10PlusInfoIndex((OMX_INDEXTYPE)0),
mStateGeneration(0),
mVendorExtensionsStatus(kExtensionsUnchecked) {
memset(&mLastHDRStaticInfo, 0, sizeof(mLastHDRStaticInfo));
@@ -2319,12 +2320,16 @@
(void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
if (mConverter[kPortIndexInput] != NULL) {
+ ALOGD("%s: encoder %s input format pcm encoding converter from %d to %d",
+ __func__, mComponentName.c_str(), pcmEncoding, codecPcmEncoding);
mInputFormat->setInt32("pcm-encoding", pcmEncoding);
}
} else {
(void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
if (mConverter[kPortIndexOutput] != NULL) {
+ ALOGD("%s: decoder %s output format pcm encoding converter from %d to %d",
+ __func__, mComponentName.c_str(), codecPcmEncoding, pcmEncoding);
mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
}
}
@@ -3765,8 +3770,17 @@
"OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
if (err != OK) {
mDescribeHDRStaticInfoIndex = (OMX_INDEXTYPE)0;
+ return err;
}
- return err;
+
+ err = mOMXNode->getExtensionIndex(
+ "OMX.google.android.index.describeHDR10PlusInfo", &mDescribeHDR10PlusInfoIndex);
+ if (err != OK) {
+ mDescribeHDR10PlusInfoIndex = (OMX_INDEXTYPE)0;
+ return err;
+ }
+
+ return OK;
}
status_t ACodec::setHDRStaticInfo(const DescribeHDRStaticInfoParams ¶ms) {
@@ -4411,8 +4425,8 @@
h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
// disable B-frames until MPEG4Writer can guarantee finalizing files with B-frames
- h264type.nRefFrames = 1;
- h264type.nBFrames = 0;
+ // h264type.nRefFrames = 1;
+ // h264type.nBFrames = 0;
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
@@ -5397,6 +5411,70 @@
return getVendorParameters(portIndex, notify);
}
+DescribeHDR10PlusInfoParams* ACodec::getHDR10PlusInfo(size_t paramSizeUsed) {
+ if (mDescribeHDR10PlusInfoIndex == 0) {
+ ALOGE("getHDR10PlusInfo: does not support DescribeHDR10PlusInfoParams");
+ return nullptr;
+ }
+
+ size_t newSize = sizeof(DescribeHDR10PlusInfoParams) - 1 +
+ ((paramSizeUsed > 0) ? paramSizeUsed : 512);
+ if (mHdr10PlusScratchBuffer == nullptr
+ || newSize > mHdr10PlusScratchBuffer->size()) {
+ mHdr10PlusScratchBuffer = new ABuffer(newSize);
+ }
+ DescribeHDR10PlusInfoParams *config =
+ (DescribeHDR10PlusInfoParams *)mHdr10PlusScratchBuffer->data();
+ InitOMXParams(config);
+ config->nSize = mHdr10PlusScratchBuffer->size();
+ config->nPortIndex = 1;
+ size_t paramSize = config->nSize - sizeof(DescribeHDR10PlusInfoParams) + 1;
+ config->nParamSize = paramSize;
+ config->nParamSizeUsed = 0;
+ status_t err = mOMXNode->getConfig(
+ (OMX_INDEXTYPE)mDescribeHDR10PlusInfoIndex,
+ config, config->nSize);
+ if (err != OK) {
+ ALOGE("failed to get DescribeHDR10PlusInfoParams (err %d)", err);
+ return nullptr;
+ }
+ if (config->nParamSize != paramSize) {
+ ALOGE("DescribeHDR10PlusInfoParams alters nParamSize: %u vs %zu",
+ config->nParamSize, paramSize);
+ return nullptr;
+ }
+ if (paramSizeUsed > 0 && config->nParamSizeUsed != paramSizeUsed) {
+ ALOGE("DescribeHDR10PlusInfoParams returns wrong nParamSizeUsed: %u vs %zu",
+ config->nParamSizeUsed, paramSizeUsed);
+ return nullptr;
+ }
+ return config;
+}
+
+void ACodec::onConfigUpdate(OMX_INDEXTYPE configIndex) {
+ if (mDescribeHDR10PlusInfoIndex == 0
+ || configIndex != mDescribeHDR10PlusInfoIndex) {
+ // mDescribeHDR10PlusInfoIndex is the only update we recognize now
+ return;
+ }
+
+ DescribeHDR10PlusInfoParams *config = getHDR10PlusInfo();
+ if (config == nullptr) {
+ return;
+ }
+ if (config->nParamSizeUsed > config->nParamSize) {
+ // try again with the size specified
+ config = getHDR10PlusInfo(config->nParamSizeUsed);
+ if (config == nullptr) {
+ return;
+ }
+ }
+
+ mOutputFormat = mOutputFormat->dup(); // trigger an output format changed event
+ mOutputFormat->setBuffer("hdr10-plus-info",
+ ABuffer::CreateAsCopy(config->nValue, config->nParamSizeUsed));
+}
+
void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
// aspects are normally communicated in ColorAspects
int32_t range, standard, transfer;
@@ -6337,6 +6415,15 @@
}
}
+ sp<ABuffer> hdr10PlusInfo;
+ if (buffer->format()->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+ && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0
+ && hdr10PlusInfo != mCodec->mLastHdr10PlusBuffer) {
+ native_window_set_buffers_hdr10_plus_metadata(mCodec->mNativeWindow.get(),
+ hdr10PlusInfo->size(), hdr10PlusInfo->data());
+ mCodec->mLastHdr10PlusBuffer = hdr10PlusInfo;
+ }
+
// save buffers sent to the surface so we can get render time when they return
int64_t mediaTimeUs = -1;
buffer->meta()->findInt64("timeUs", &mediaTimeUs);
@@ -7475,12 +7562,45 @@
}
}
+ sp<ABuffer> hdr10PlusInfo;
+ if (params->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+ && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
+ (void)setHdr10PlusInfo(hdr10PlusInfo);
+ }
+
// Ignore errors as failure is expected for codecs that aren't video encoders.
(void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
return setVendorParameters(params);
}
+status_t ACodec::setHdr10PlusInfo(const sp<ABuffer> &hdr10PlusInfo) {
+ if (mDescribeHDR10PlusInfoIndex == 0) {
+ ALOGE("setHdr10PlusInfo: does not support DescribeHDR10PlusInfoParams");
+ return ERROR_UNSUPPORTED;
+ }
+ size_t newSize = sizeof(DescribeHDR10PlusInfoParams) + hdr10PlusInfo->size() - 1;
+ if (mHdr10PlusScratchBuffer == nullptr ||
+ newSize > mHdr10PlusScratchBuffer->size()) {
+ mHdr10PlusScratchBuffer = new ABuffer(newSize);
+ }
+ DescribeHDR10PlusInfoParams *config =
+ (DescribeHDR10PlusInfoParams *)mHdr10PlusScratchBuffer->data();
+ InitOMXParams(config);
+ config->nPortIndex = 0;
+ config->nSize = newSize;
+ config->nParamSize = hdr10PlusInfo->size();
+ config->nParamSizeUsed = hdr10PlusInfo->size();
+ memcpy(config->nValue, hdr10PlusInfo->data(), hdr10PlusInfo->size());
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)mDescribeHDR10PlusInfoIndex,
+ config, config->nSize);
+ if (err != OK) {
+ ALOGE("failed to set DescribeHDR10PlusInfoParams (err %d)", err);
+ }
+ return OK;
+}
+
// Removes trailing tags matching |tag| from |key| (e.g. a settings name). |minLength| specifies
// the minimum number of characters to keep in |key| (even if it has trailing tags).
// (Used to remove trailing 'value' tags in settings names, e.g. to normalize
@@ -7902,6 +8022,15 @@
return true;
}
+ case OMX_EventConfigUpdate:
+ {
+ CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
+
+ mCodec->onConfigUpdate((OMX_INDEXTYPE)data2);
+
+ return true;
+ }
+
case OMX_EventBufferFlag:
{
return true;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 9aea88a..249f2a4 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -77,7 +77,7 @@
},
}
-cc_library_shared {
+cc_library {
name: "libstagefright",
srcs: [
@@ -133,6 +133,7 @@
"SurfaceUtils.cpp",
"Utils.cpp",
"ThrottledSource.cpp",
+ "VideoFrameSchedulerBase.cpp",
"VideoFrameScheduler.cpp",
],
@@ -145,7 +146,6 @@
"libdl",
"libdrmframework",
"libgui",
- "libion",
"liblog",
"libmedia",
"libmedia_omx",
@@ -153,7 +153,6 @@
"libmediaextractor",
"libmediametrics",
"libmediautils",
- "libnetd_client",
"libui",
"libutils",
"libmedia_helper",
@@ -161,7 +160,6 @@
"libstagefright_foundation",
"libstagefright_omx_utils",
"libstagefright_opus_common",
- "libstagefright_xmlparser",
"libRScpp",
"libhidlallocatorutils",
"libhidlbase",
@@ -170,8 +168,6 @@
"android.hidl.allocator@1.0",
"android.hardware.cas.native@1.0",
"android.hardware.media.omx@1.0",
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.mapper@2.0",
],
static_libs: [
@@ -189,6 +185,7 @@
],
header_libs:[
+ "libstagefright_xmlparser_headers",
"media_ndk_headers",
],
@@ -237,7 +234,8 @@
"MediaClock.cpp",
"NdkUtils.cpp",
"Utils.cpp",
- "VideoFrameScheduler.cpp",
+ "VideoFrameSchedulerBase.cpp",
+ "VideoFrameScheduler2.cpp",
"http/ClearMediaHTTP.cpp",
],
@@ -247,10 +245,12 @@
"libnetd_client",
"libutils",
"libstagefright_foundation",
+ "libandroid",
],
static_libs: [
"libmedia_player2_util",
+ "libmedia2_jni_core",
],
export_include_dirs: [
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index a6f0a0b..199b57b 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -118,6 +118,13 @@
}
sp<MetaData> format = mSource->getFormat();
+
+ if (format == NULL) {
+ ALOGE("No metadata b/118620871");
+ android_errorWriteLog(0x534e4554, "118620871");
+ return BAD_VALUE;
+ }
+
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 2ae3218..9de1e22 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -506,4 +506,11 @@
return NO_INIT;
}
+status_t AudioSource::getPortId(audio_port_handle_t *portId) const {
+ if (mRecord != 0) {
+ *portId = mRecord->getPortId();
+ return NO_ERROR;
+ }
+ return NO_INIT;
+}
} // namespace android
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 3ad82d9..2a819ad 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -19,6 +19,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "CameraSourceTimeLapse"
+#include <media/hardware/HardwareAPI.h>
#include <binder/IPCThreadState.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
@@ -172,8 +173,16 @@
ALOGV("signalBufferReturned");
Mutex::Autolock autoLock(mQuickStopLock);
if (mQuickStop && (buffer == mLastReadBufferCopy)) {
+ if (metaDataStoredInVideoBuffers() == kMetadataBufferTypeNativeHandleSource) {
+ native_handle_t* handle = (
+ (VideoNativeHandleMetadata*)(mLastReadBufferCopy->data()))->pHandle;
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
buffer->setObserver(NULL);
buffer->release();
+ mLastReadBufferCopy = NULL;
+ mForceRead = true;
} else {
return CameraSource::signalBufferReturned(buffer);
}
@@ -182,7 +191,8 @@
void createMediaBufferCopy(
const MediaBufferBase& sourceBuffer,
int64_t frameTime,
- MediaBufferBase **newBuffer) {
+ MediaBufferBase **newBuffer,
+ int32_t videoBufferMode) {
ALOGV("createMediaBufferCopy");
size_t sourceSize = sourceBuffer.size();
@@ -192,13 +202,20 @@
memcpy((*newBuffer)->data(), sourcePointer, sourceSize);
(*newBuffer)->meta_data().setInt64(kKeyTime, frameTime);
+
+ if (videoBufferMode == kMetadataBufferTypeNativeHandleSource) {
+ ((VideoNativeHandleMetadata*)((*newBuffer)->data()))->pHandle =
+ native_handle_clone(
+ ((VideoNativeHandleMetadata*)(sourceBuffer.data()))->pHandle);
+ }
}
void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBufferBase& sourceBuffer) {
ALOGV("fillLastReadBufferCopy");
int64_t frameTime;
CHECK(sourceBuffer.meta_data().findInt64(kKeyTime, &frameTime));
- createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
+ createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy,
+ metaDataStoredInVideoBuffers());
mLastReadBufferCopy->add_ref();
mLastReadBufferCopy->setObserver(this);
}
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 03e0d12..d118e8c 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -26,8 +26,6 @@
#include <cutils/properties.h>
#include <cutils/qtaguid.h>
-#include <NetdClient.h>
-
namespace android {
HTTPBase::HTTPBase()
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 6ff3d78..b45eb03 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -137,6 +137,8 @@
private:
enum {
+ // TODO: need to increase this considering the bug
+ // about camera app not sending video frames continuously?
kMaxCttsOffsetTimeUs = 1000000LL, // 1 second
kSampleArraySize = 1000,
};
@@ -317,6 +319,7 @@
ListTableEntries<uint32_t, 1> *mStssTableEntries;
ListTableEntries<uint32_t, 2> *mSttsTableEntries;
ListTableEntries<uint32_t, 2> *mCttsTableEntries;
+ ListTableEntries<uint32_t, 3> *mElstTableEntries; // 3columns: segDuration, mediaTime, mediaRate
int64_t mMinCttsOffsetTimeUs;
int64_t mMinCttsOffsetTicks;
@@ -416,6 +419,8 @@
// Duration is time scale based
void addOneSttsTableEntry(size_t sampleCount, int32_t timescaledDur);
void addOneCttsTableEntry(size_t sampleCount, int32_t timescaledDur);
+ void addOneElstTableEntry(uint32_t segmentDuration, int32_t mediaTime,
+ int16_t mediaRate, int16_t mediaRateFraction);
bool isTrackMalFormed() const;
void sendTrackSummary(bool hasMultipleTracks);
@@ -448,6 +453,7 @@
void writeVideoFourCCBox();
void writeMetadataFourCCBox();
void writeStblBox(bool use32BitOffset);
+ void writeEdtsBox();
Track(const Track &);
Track &operator=(const Track &);
@@ -483,6 +489,7 @@
mStartTimestampUs = -1ll;
mStartTimeOffsetMs = -1;
+ mStartTimeOffsetBFramesUs = 0;
mPaused = false;
mStarted = false;
mWriterThreadStarted = false;
@@ -1272,6 +1279,10 @@
// Adjust the global start time.
mStartTimestampUs += minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs;
+ // Add mStartTimeOffsetBFramesUs(-ve or zero) to the duration of first entry in STTS.
+ mStartTimeOffsetBFramesUs = minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs;
+ ALOGV("mStartTimeOffsetBFramesUs :%" PRId32, mStartTimeOffsetBFramesUs);
+
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
if (!(*it)->isHeic()) {
@@ -1747,6 +1758,11 @@
return mStartTimestampUs;
}
+int32_t MPEG4Writer::getStartTimeOffsetBFramesUs() {
+ Mutex::Autolock autoLock(mLock);
+ return mStartTimeOffsetBFramesUs;
+}
+
size_t MPEG4Writer::numTracks() {
Mutex::Autolock autolock(mLock);
return mTracks.size();
@@ -1776,6 +1792,7 @@
mStssTableEntries(new ListTableEntries<uint32_t, 1>(1000)),
mSttsTableEntries(new ListTableEntries<uint32_t, 2>(1000)),
mCttsTableEntries(new ListTableEntries<uint32_t, 2>(1000)),
+ mElstTableEntries(new ListTableEntries<uint32_t, 3>(3)), // Reserve 3 rows, a row has 3 items
mMinCttsOffsetTimeUs(0),
mMinCttsOffsetTicks(0),
mMaxCttsOffsetTicks(0),
@@ -1842,46 +1859,48 @@
// Clear all the internal states except the CSD data.
void MPEG4Writer::Track::resetInternal() {
- mDone = false;
- mPaused = false;
- mResumed = false;
- mStarted = false;
- mGotStartKeyFrame = false;
- mIsMalformed = false;
- mTrackDurationUs = 0;
- mEstimatedTrackSizeBytes = 0;
- mSamplesHaveSameSize = 0;
- if (mStszTableEntries != NULL) {
- delete mStszTableEntries;
- mStszTableEntries = new ListTableEntries<uint32_t, 1>(1000);
- }
-
- if (mStcoTableEntries != NULL) {
- delete mStcoTableEntries;
- mStcoTableEntries = new ListTableEntries<uint32_t, 1>(1000);
- }
- if (mCo64TableEntries != NULL) {
- delete mCo64TableEntries;
- mCo64TableEntries = new ListTableEntries<off64_t, 1>(1000);
- }
-
- if (mStscTableEntries != NULL) {
- delete mStscTableEntries;
- mStscTableEntries = new ListTableEntries<uint32_t, 3>(1000);
- }
- if (mStssTableEntries != NULL) {
- delete mStssTableEntries;
- mStssTableEntries = new ListTableEntries<uint32_t, 1>(1000);
- }
- if (mSttsTableEntries != NULL) {
- delete mSttsTableEntries;
- mSttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
- }
- if (mCttsTableEntries != NULL) {
- delete mCttsTableEntries;
- mCttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
- }
- mReachedEOS = false;
+ mDone = false;
+ mPaused = false;
+ mResumed = false;
+ mStarted = false;
+ mGotStartKeyFrame = false;
+ mIsMalformed = false;
+ mTrackDurationUs = 0;
+ mEstimatedTrackSizeBytes = 0;
+ mSamplesHaveSameSize = 0;
+ if (mStszTableEntries != NULL) {
+ delete mStszTableEntries;
+ mStszTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+ if (mStcoTableEntries != NULL) {
+ delete mStcoTableEntries;
+ mStcoTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+ if (mCo64TableEntries != NULL) {
+ delete mCo64TableEntries;
+ mCo64TableEntries = new ListTableEntries<off64_t, 1>(1000);
+ }
+ if (mStscTableEntries != NULL) {
+ delete mStscTableEntries;
+ mStscTableEntries = new ListTableEntries<uint32_t, 3>(1000);
+ }
+ if (mStssTableEntries != NULL) {
+ delete mStssTableEntries;
+ mStssTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+ if (mSttsTableEntries != NULL) {
+ delete mSttsTableEntries;
+ mSttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+ }
+ if (mCttsTableEntries != NULL) {
+ delete mCttsTableEntries;
+ mCttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+ }
+ if (mElstTableEntries != NULL) {
+ delete mElstTableEntries;
+ mElstTableEntries = new ListTableEntries<uint32_t, 3>(3);
+ }
+ mReachedEOS = false;
}
void MPEG4Writer::Track::updateTrackSizeEstimate() {
@@ -1900,6 +1919,7 @@
mStssTableEntries->count() * 4 + // stss box size
mSttsTableEntries->count() * 8 + // stts box size
mCttsTableEntries->count() * 8 + // ctts box size
+ mElstTableEntries->count() * 12 + // elst box size
stcoBoxSizeBytes + // stco box size
stszBoxSizeBytes; // stsz box size
}
@@ -1936,6 +1956,16 @@
mCttsTableEntries->add(htonl(duration));
}
+void MPEG4Writer::Track::addOneElstTableEntry(
+ uint32_t segmentDuration, int32_t mediaTime, int16_t mediaRate, int16_t mediaRateFraction) {
+ ALOGV("segmentDuration:%u, mediaTime:%d", segmentDuration, mediaTime);
+ ALOGV("mediaRate :%" PRId16 ", mediaRateFraction :%" PRId16 ", Ored %u", mediaRate,
+ mediaRateFraction, ((((uint32_t)mediaRate) << 16) | ((uint32_t)mediaRateFraction)));
+ mElstTableEntries->add(htonl(segmentDuration));
+ mElstTableEntries->add(htonl(mediaTime));
+ mElstTableEntries->add(htonl((((uint32_t)mediaRate) << 16) | (uint32_t)mediaRateFraction));
+}
+
status_t MPEG4Writer::setNextFd(int fd) {
ALOGV("addNextFd");
Mutex::Autolock l(mLock);
@@ -2173,6 +2203,7 @@
delete mSttsTableEntries;
delete mStssTableEntries;
delete mCttsTableEntries;
+ delete mElstTableEntries;
mStszTableEntries = NULL;
mStcoTableEntries = NULL;
@@ -2181,6 +2212,7 @@
mSttsTableEntries = NULL;
mStssTableEntries = NULL;
mCttsTableEntries = NULL;
+ mElstTableEntries = NULL;
if (mCodecSpecificData != NULL) {
free(mCodecSpecificData);
@@ -3612,6 +3644,7 @@
uint32_t now = getMpeg4Time();
mOwner->beginBox("trak");
writeTkhdBox(now);
+ writeEdtsBox();
mOwner->beginBox("mdia");
writeMdhdBox(now);
writeHdlrBox();
@@ -3674,6 +3707,29 @@
TRESPASS();
}
mOwner->beginBox(fourcc); // TextMetaDataSampleEntry
+
+ // HACK to make the metadata track compliant with the ISO standard.
+ //
+ // Metadata track is added from API 26 and the original implementation does not
+ // fully followed the TextMetaDataSampleEntry specified in ISO/IEC 14496-12-2015
+ // in that only the mime_format is written out. content_encoding and
+ // data_reference_index have not been written out. This leads to the failure
+ // when some MP4 parser tries to parse the metadata track according to the
+ // standard. The hack here will make the metadata track compliant with the
+ // standard while still maintaining backwards compatibility. This would enable
+ // Android versions before API 29 to be able to read out the standard compliant
+ // Metadata track generated with Android API 29 and upward. The trick is based
+ // on the fact that the Metadata track must start with prefix “application/” and
+ // those missing fields are not used in Android's Metadata track. By writting
+ // out the mime_format twice, the first mime_format will be used to fill out the
+ // missing reserved, data_reference_index and content encoding fields. On the
+ // parser side, the extracter before API 29 will read out the first mime_format
+ // correctly and drop the second mime_format. The extractor from API 29 will
+ // check if the reserved, data_reference_index and content encoding are filled
+ // with “application” to detect if this is a standard compliant metadata track
+ // and read out the data accordingly.
+ mOwner->writeCString(mime);
+
mOwner->writeCString(mime); // metadata mime_format
mOwner->endBox(); // mett
}
@@ -3959,6 +4015,33 @@
mOwner->endBox();
}
+void MPEG4Writer::Track::writeEdtsBox(){
+ ALOGV("%s : getStartTimeOffsetTimeUs of track:%" PRId64 " us", getTrackType(),
+ getStartTimeOffsetTimeUs());
+
+ // Prepone video playback.
+ if (mMinCttsOffsetTicks != mMaxCttsOffsetTicks) {
+ int32_t mvhdTimeScale = mOwner->getTimeScale();
+ uint32_t tkhdDuration = (mTrackDurationUs * mvhdTimeScale + 5E5) / 1E6;
+ int64_t mediaTime = ((kMaxCttsOffsetTimeUs - getMinCttsOffsetTimeUs())
+ * mTimeScale + 5E5) / 1E6;
+ if (tkhdDuration > 0 && mediaTime > 0) {
+ addOneElstTableEntry(tkhdDuration, mediaTime, 1, 0);
+ }
+ }
+
+ if (mElstTableEntries->count() == 0) {
+ return;
+ }
+
+ mOwner->beginBox("edts");
+ mOwner->beginBox("elst");
+ mOwner->writeInt32(0); // version=0, flags=0
+ mElstTableEntries->write(mOwner);
+ mOwner->endBox(); // elst;
+ mOwner->endBox(); // edts
+}
+
void MPEG4Writer::Track::writeMdhdBox(uint32_t now) {
int64_t trakDurationUs = getDurationUs();
int64_t mdhdDuration = (trakDurationUs * mTimeScale + 5E5) / 1E6;
@@ -4095,7 +4178,9 @@
uint32_t duration;
CHECK(mSttsTableEntries->get(duration, 1));
duration = htonl(duration); // Back to host byte order
- mSttsTableEntries->set(htonl(duration + getStartTimeOffsetScaledTime()), 1);
+ int32_t startTimeOffsetScaled = (((getStartTimeOffsetTimeUs() +
+ mOwner->getStartTimeOffsetBFramesUs()) * mTimeScale) + 500000LL) / 1000000LL;
+ mSttsTableEntries->set(htonl((int32_t)duration + startTimeOffsetScaled), 1);
}
mSttsTableEntries->write(mOwner);
mOwner->endBox(); // stts
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 70064ea..bd9e2bb 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2197,6 +2197,13 @@
}
}
+ sp<ABuffer> hdr10PlusInfo;
+ if (mOutputFormat->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+ && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
+ native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
+ hdr10PlusInfo->size(), hdr10PlusInfo->data());
+ }
+
if (mime.startsWithIgnoreCase("video/")) {
mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
}
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index ea818ff..9511931 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -43,63 +43,24 @@
}
// --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV1::MediaExtractorCUnwrapperV1(CMediaExtractor *plugin) {
+MediaExtractorCUnwrapper::MediaExtractorCUnwrapper(CMediaExtractor *plugin) {
this->plugin = plugin;
}
-MediaExtractorCUnwrapperV1::~MediaExtractorCUnwrapperV1() {
+MediaExtractorCUnwrapper::~MediaExtractorCUnwrapper() {
plugin->free(plugin->data);
free(plugin);
}
-size_t MediaExtractorCUnwrapperV1::countTracks() {
+size_t MediaExtractorCUnwrapper::countTracks() {
return plugin->countTracks(plugin->data);
}
-MediaTrack *MediaExtractorCUnwrapperV1::getTrack(size_t index) {
+MediaTrack *MediaExtractorCUnwrapper::getTrack(size_t index) {
return new MediaTrackCUnwrapper(plugin->getTrack(plugin->data, index));
}
-status_t MediaExtractorCUnwrapperV1::getTrackMetaData(
- MetaDataBase& meta, size_t index, uint32_t flags) {
- return plugin->getTrackMetaData(plugin->data, meta, index, flags);
-}
-
-status_t MediaExtractorCUnwrapperV1::getMetaData(MetaDataBase& meta) {
- return plugin->getMetaData(plugin->data, meta);
-}
-
-const char * MediaExtractorCUnwrapperV1::name() {
- return plugin->name(plugin->data);
-}
-
-uint32_t MediaExtractorCUnwrapperV1::flags() const {
- return plugin->flags(plugin->data);
-}
-
-status_t MediaExtractorCUnwrapperV1::setMediaCas(const uint8_t* casToken, size_t size) {
- return plugin->setMediaCas(plugin->data, casToken, size);
-}
-
-// --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV2::MediaExtractorCUnwrapperV2(CMediaExtractorV2 *plugin) {
- this->plugin = plugin;
-}
-
-MediaExtractorCUnwrapperV2::~MediaExtractorCUnwrapperV2() {
- plugin->free(plugin->data);
- free(plugin);
-}
-
-size_t MediaExtractorCUnwrapperV2::countTracks() {
- return plugin->countTracks(plugin->data);
-}
-
-MediaTrack *MediaExtractorCUnwrapperV2::getTrack(size_t index) {
- return new MediaTrackCUnwrapperV2(plugin->getTrack(plugin->data, index));
-}
-
-status_t MediaExtractorCUnwrapperV2::getTrackMetaData(
+status_t MediaExtractorCUnwrapper::getTrackMetaData(
MetaDataBase& meta, size_t index, uint32_t flags) {
sp<AMessage> msg = new AMessage();
AMediaFormat *format = AMediaFormat_fromMsg(&msg);
@@ -111,7 +72,7 @@
return reverse_translate_error(ret);
}
-status_t MediaExtractorCUnwrapperV2::getMetaData(MetaDataBase& meta) {
+status_t MediaExtractorCUnwrapper::getMetaData(MetaDataBase& meta) {
sp<AMessage> msg = new AMessage();
AMediaFormat *format = AMediaFormat_fromMsg(&msg);
media_status_t ret = plugin->getMetaData(plugin->data, format);
@@ -122,68 +83,15 @@
return reverse_translate_error(ret);
}
-const char * MediaExtractorCUnwrapperV2::name() {
+const char * MediaExtractorCUnwrapper::name() {
return plugin->name(plugin->data);
}
-uint32_t MediaExtractorCUnwrapperV2::flags() const {
+uint32_t MediaExtractorCUnwrapper::flags() const {
return plugin->flags(plugin->data);
}
-status_t MediaExtractorCUnwrapperV2::setMediaCas(const uint8_t* casToken, size_t size) {
- return plugin->setMediaCas(plugin->data, casToken, size);
-}
-
-// --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV3::MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin) {
- this->plugin = plugin;
-}
-
-MediaExtractorCUnwrapperV3::~MediaExtractorCUnwrapperV3() {
- plugin->free(plugin->data);
- free(plugin);
-}
-
-size_t MediaExtractorCUnwrapperV3::countTracks() {
- return plugin->countTracks(plugin->data);
-}
-
-MediaTrack *MediaExtractorCUnwrapperV3::getTrack(size_t index) {
- return new MediaTrackCUnwrapperV3(plugin->getTrack(plugin->data, index));
-}
-
-status_t MediaExtractorCUnwrapperV3::getTrackMetaData(
- MetaDataBase& meta, size_t index, uint32_t flags) {
- sp<AMessage> msg = new AMessage();
- AMediaFormat *format = AMediaFormat_fromMsg(&msg);
- media_status_t ret = plugin->getTrackMetaData(plugin->data, format, index, flags);
- sp<MetaData> newMeta = new MetaData();
- convertMessageToMetaData(msg, newMeta);
- delete format;
- meta = *newMeta;
- return reverse_translate_error(ret);
-}
-
-status_t MediaExtractorCUnwrapperV3::getMetaData(MetaDataBase& meta) {
- sp<AMessage> msg = new AMessage();
- AMediaFormat *format = AMediaFormat_fromMsg(&msg);
- media_status_t ret = plugin->getMetaData(plugin->data, format);
- sp<MetaData> newMeta = new MetaData();
- convertMessageToMetaData(msg, newMeta);
- delete format;
- meta = *newMeta;
- return reverse_translate_error(ret);
-}
-
-const char * MediaExtractorCUnwrapperV3::name() {
- return plugin->name(plugin->data);
-}
-
-uint32_t MediaExtractorCUnwrapperV3::flags() const {
- return plugin->flags(plugin->data);
-}
-
-status_t MediaExtractorCUnwrapperV3::setMediaCas(const uint8_t* casToken, size_t size) {
+status_t MediaExtractorCUnwrapper::setMediaCas(const uint8_t* casToken, size_t size) {
return plugin->setMediaCas(plugin->data, casToken, size);
}
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 81fc4ae..2c7a4e5 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -106,24 +106,12 @@
}
MediaExtractor *ex = nullptr;
- if (creatorVersion == 1) {
- CMediaExtractor *ret = ((CreatorFuncV1)creator)(source->wrap(), meta);
+ if (creatorVersion == EXTRACTORDEF_VERSION_NDK_V1) {
+ CMediaExtractor *ret = ((CreatorFunc)creator)(source->wrap(), meta);
if (meta != nullptr && freeMeta != nullptr) {
freeMeta(meta);
}
- ex = ret != nullptr ? new MediaExtractorCUnwrapperV1(ret) : nullptr;
- } else if (creatorVersion == 2) {
- CMediaExtractorV2 *ret = ((CreatorFuncV2)creator)(source->wrap(), meta);
- if (meta != nullptr && freeMeta != nullptr) {
- freeMeta(meta);
- }
- ex = ret != nullptr ? new MediaExtractorCUnwrapperV2(ret) : nullptr;
- } else if (creatorVersion == 3) {
- CMediaExtractorV3 *ret = ((CreatorFuncV3)creator)(source->wrap(), meta);
- if (meta != nullptr && freeMeta != nullptr) {
- freeMeta(meta);
- }
- ex = ret != nullptr ? new MediaExtractorCUnwrapperV3(ret) : nullptr;
+ ex = ret != nullptr ? new MediaExtractorCUnwrapper(ret) : nullptr;
}
ALOGV("Created an extractor '%s' with confidence %.2f",
@@ -195,15 +183,9 @@
FreeMetaFunc newFreeMeta = nullptr;
void *curCreator = NULL;
- if ((*it)->def.def_version == 1) {
- curCreator = (void*) (*it)->def.sniff.v1(
- source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
- } else if ((*it)->def.def_version == 2) {
+ if ((*it)->def.def_version == EXTRACTORDEF_VERSION_NDK_V1) {
curCreator = (void*) (*it)->def.sniff.v2(
source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
- } else if ((*it)->def.def_version == 3) {
- curCreator = (void*) (*it)->def.sniff.v3(
- source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
}
if (curCreator) {
@@ -232,8 +214,7 @@
void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
std::list<sp<ExtractorPlugin>> &pluginList) {
// sanity check check struct version, uuid, name
- if (plugin->def.def_version == 0
- || plugin->def.def_version > EXTRACTORDEF_VERSION_CURRENT + 1) {
+ if (plugin->def.def_version != EXTRACTORDEF_VERSION_NDK_V1) {
ALOGE("don't understand extractor format %u, ignoring.", plugin->def.def_version);
return;
}
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 6c0f989..f158491 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -16,6 +16,7 @@
#include <mutex>
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/Utils.h>
@@ -57,27 +58,39 @@
return (mOptions & kSeekTo_Option) != 0;
}
-/* -------------- unwrapper v1 --------------- */
+/* -------------- unwrapper --------------- */
MediaTrackCUnwrapper::MediaTrackCUnwrapper(CMediaTrack *cmediatrack) {
wrapper = cmediatrack;
+ bufferGroup = nullptr;
}
MediaTrackCUnwrapper::~MediaTrackCUnwrapper() {
wrapper->free(wrapper->data);
free(wrapper);
+ delete bufferGroup;
}
status_t MediaTrackCUnwrapper::start() {
- return wrapper->start(wrapper->data);
+ if (bufferGroup == nullptr) {
+ bufferGroup = new MediaBufferGroup();
+ }
+ return reverse_translate_error(wrapper->start(wrapper->data, bufferGroup->wrap()));
}
status_t MediaTrackCUnwrapper::stop() {
- return wrapper->stop(wrapper->data);
+ return reverse_translate_error(wrapper->stop(wrapper->data));
}
status_t MediaTrackCUnwrapper::getFormat(MetaDataBase& format) {
- return wrapper->getFormat(wrapper->data, format);
+ sp<AMessage> msg = new AMessage();
+ AMediaFormat *tmpFormat = AMediaFormat_fromMsg(&msg);
+ media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
+ sp<MetaData> newMeta = new MetaData();
+ convertMessageToMetaData(msg, newMeta);
+ delete tmpFormat;
+ format = *newMeta;
+ return reverse_translate_error(ret);
}
status_t MediaTrackCUnwrapper::read(MediaBufferBase **buffer, const ReadOptions *options) {
@@ -94,116 +107,7 @@
opts |= SEEK;
opts |= (uint32_t) seekMode;
}
-
-
- return wrapper->read(wrapper->data, buffer, opts, seekPosition);
-}
-
-bool MediaTrackCUnwrapper::supportNonblockingRead() {
- return wrapper->supportsNonBlockingRead(wrapper->data);
-}
-
-/* -------------- unwrapper v2 --------------- */
-
-MediaTrackCUnwrapperV2::MediaTrackCUnwrapperV2(CMediaTrackV2 *cmediatrack2) {
- wrapper = cmediatrack2;
-}
-
-MediaTrackCUnwrapperV2::~MediaTrackCUnwrapperV2() {
- wrapper->free(wrapper->data);
- free(wrapper);
-}
-
-status_t MediaTrackCUnwrapperV2::start() {
- return reverse_translate_error(wrapper->start(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV2::stop() {
- return reverse_translate_error(wrapper->stop(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV2::getFormat(MetaDataBase& format) {
- sp<AMessage> msg = new AMessage();
- AMediaFormat *tmpFormat = AMediaFormat_fromMsg(&msg);
- media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
- sp<MetaData> newMeta = new MetaData();
- convertMessageToMetaData(msg, newMeta);
- delete tmpFormat;
- format = *newMeta;
- return reverse_translate_error(ret);
-}
-
-status_t MediaTrackCUnwrapperV2::read(MediaBufferBase **buffer, const ReadOptions *options) {
-
- uint32_t opts = 0;
-
- if (options && options->getNonBlocking()) {
- opts |= CMediaTrackReadOptions::NONBLOCKING;
- }
-
- int64_t seekPosition = 0;
- MediaTrack::ReadOptions::SeekMode seekMode;
- if (options && options->getSeekTo(&seekPosition, &seekMode)) {
- opts |= SEEK;
- opts |= (uint32_t) seekMode;
- }
-
- return reverse_translate_error(wrapper->read(wrapper->data, buffer, opts, seekPosition));
-}
-
-bool MediaTrackCUnwrapperV2::supportNonblockingRead() {
- return wrapper->supportsNonBlockingRead(wrapper->data);
-}
-
-/* -------------- unwrapper v3 --------------- */
-
-MediaTrackCUnwrapperV3::MediaTrackCUnwrapperV3(CMediaTrackV3 *cmediatrack3) {
- wrapper = cmediatrack3;
- bufferGroup = nullptr;
-}
-
-MediaTrackCUnwrapperV3::~MediaTrackCUnwrapperV3() {
- wrapper->free(wrapper->data);
- free(wrapper);
-}
-
-status_t MediaTrackCUnwrapperV3::start() {
- if (bufferGroup == nullptr) {
- bufferGroup = new MediaBufferGroup();
- }
- return reverse_translate_error(wrapper->start(wrapper->data, bufferGroup->wrap()));
-}
-
-status_t MediaTrackCUnwrapperV3::stop() {
- return reverse_translate_error(wrapper->stop(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV3::getFormat(MetaDataBase& format) {
- sp<AMessage> msg = new AMessage();
- AMediaFormat *tmpFormat = AMediaFormat_fromMsg(&msg);
- media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
- sp<MetaData> newMeta = new MetaData();
- convertMessageToMetaData(msg, newMeta);
- delete tmpFormat;
- format = *newMeta;
- return reverse_translate_error(ret);
-}
-
-status_t MediaTrackCUnwrapperV3::read(MediaBufferBase **buffer, const ReadOptions *options) {
-
- uint32_t opts = 0;
-
- if (options && options->getNonBlocking()) {
- opts |= CMediaTrackReadOptions::NONBLOCKING;
- }
-
- int64_t seekPosition = 0;
- MediaTrack::ReadOptions::SeekMode seekMode;
- if (options && options->getSeekTo(&seekPosition, &seekMode)) {
- opts |= SEEK;
- opts |= (uint32_t) seekMode;
- }
- CMediaBufferV3 *buf = nullptr;
+ CMediaBuffer *buf = nullptr;
media_status_t ret = wrapper->read(wrapper->data, &buf, opts, seekPosition);
if (ret == AMEDIA_OK && buf != nullptr) {
*buffer = (MediaBufferBase*)buf->handle;
@@ -216,10 +120,58 @@
if (format->mFormat->findInt64("timeUs", &val64)) {
meta.setInt64(kKeyTime, val64);
}
+ if (format->mFormat->findInt64("durationUs", &val64)) {
+ meta.setInt64(kKeyDuration, val64);
+ }
+ if (format->mFormat->findInt64("target-time", &val64)) {
+ meta.setInt64(kKeyTargetTime, val64);
+ }
int32_t val32;
if (format->mFormat->findInt32("is-sync-frame", &val32)) {
meta.setInt32(kKeyIsSyncFrame, val32);
}
+ if (format->mFormat->findInt32("temporal-layer-id", &val32)) {
+ meta.setInt32(kKeyTemporalLayerId, val32);
+ }
+ if (format->mFormat->findInt32("temporal-layer-count", &val32)) {
+ meta.setInt32(kKeyTemporalLayerCount, val32);
+ }
+ if (format->mFormat->findInt32("crypto-default-iv-size", &val32)) {
+ meta.setInt32(kKeyCryptoDefaultIVSize, val32);
+ }
+ if (format->mFormat->findInt32("crypto-mode", &val32)) {
+ meta.setInt32(kKeyCryptoMode, val32);
+ }
+ if (format->mFormat->findInt32("crypto-encrypted-byte-block", &val32)) {
+ meta.setInt32(kKeyEncryptedByteBlock, val32);
+ }
+ if (format->mFormat->findInt32("crypto-skip-byte-block", &val32)) {
+ meta.setInt32(kKeySkipByteBlock, val32);
+ }
+ if (format->mFormat->findInt32("valid-samples", &val32)) {
+ meta.setInt32(kKeyValidSamples, val32);
+ }
+ sp<ABuffer> valbuf;
+ if (format->mFormat->findBuffer("crypto-plain-sizes", &valbuf)) {
+ meta.setData(kKeyPlainSizes,
+ MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+ }
+ if (format->mFormat->findBuffer("crypto-encrypted-sizes", &valbuf)) {
+ meta.setData(kKeyEncryptedSizes,
+ MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+ }
+ if (format->mFormat->findBuffer("crypto-key", &valbuf)) {
+ meta.setData(kKeyCryptoKey,
+ MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+ }
+ if (format->mFormat->findBuffer("crypto-iv", &valbuf)) {
+ meta.setData(kKeyCryptoIV,
+ MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+ }
+ if (format->mFormat->findBuffer("sei", &valbuf)) {
+ meta.setData(kKeySEI,
+ MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+ }
} else {
*buffer = nullptr;
}
@@ -227,7 +179,7 @@
return reverse_translate_error(ret);
}
-bool MediaTrackCUnwrapperV3::supportNonblockingRead() {
+bool MediaTrackCUnwrapper::supportNonblockingRead() {
return wrapper->supportsNonBlockingRead(wrapper->data);
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 670b607..163cd05 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -119,7 +119,8 @@
}
// if user/container supplied HDR static info without transfer set, assume true
- if (format->contains("hdr-static-info") && !format->contains("color-transfer")) {
+ if ((format->contains("hdr-static-info") || format->contains("hdr10-plus-info"))
+ && !format->contains("color-transfer")) {
return true;
}
// otherwise, verify that an HDR transfer function is set
@@ -598,7 +599,10 @@
{
{ "exif-offset", kKeyExifOffset },
{ "exif-size", kKeyExifSize },
+ { "target-time", kKeyTargetTime },
{ "thumbnail-time", kKeyThumbnailTime },
+ { "timeUs", kKeyTime },
+ { "durationUs", kKeyDuration },
}
};
@@ -610,12 +614,14 @@
{ "crypto-default-iv-size", kKeyCryptoDefaultIVSize },
{ "crypto-encrypted-byte-block", kKeyEncryptedByteBlock },
{ "crypto-skip-byte-block", kKeySkipByteBlock },
+ { "frame-count", kKeyFrameCount },
{ "max-bitrate", kKeyMaxBitRate },
{ "pcm-big-endian", kKeyPcmBigEndian },
{ "temporal-layer-count", kKeyTemporalLayerCount },
+ { "temporal-layer-id", kKeyTemporalLayerId },
{ "thumbnail-width", kKeyThumbnailWidth },
{ "thumbnail-height", kKeyThumbnailHeight },
- { "frame-count", kKeyFrameCount },
+ { "valid-samples", kKeyValidSamples },
}
};
@@ -626,7 +632,10 @@
{ "pssh", kKeyPssh },
{ "crypto-iv", kKeyCryptoIV },
{ "crypto-key", kKeyCryptoKey },
+ { "crypto-encrypted-sizes", kKeyEncryptedSizes },
+ { "crypto-plain-sizes", kKeyPlainSizes },
{ "icc-profile", kKeyIccProfile },
+ { "sei", kKeySEI },
{ "text-format-data", kKeyTextFormatData },
}
};
@@ -870,6 +879,16 @@
ColorUtils::setHDRStaticInfoIntoFormat(*(HDRStaticInfo*)data, msg);
}
+ if (meta->findData(kKeyHdr10PlusInfo, &type, &data, &size)
+ && size > 0) {
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+ msg->setBuffer("hdr10-plus-info", buffer);
+ }
+
convertMetaDataToMessageColorAspects(meta, msg);
} else if (!strncasecmp("audio/", mime, 6)) {
int32_t numChannels, sampleRate;
@@ -1618,6 +1637,12 @@
}
}
+ sp<ABuffer> hdr10PlusInfo;
+ if (msg->findBuffer("hdr10-plus-info", &hdr10PlusInfo)) {
+ meta->setData(kKeyHdr10PlusInfo, 0,
+ hdr10PlusInfo->data(), hdr10PlusInfo->size());
+ }
+
convertMessageToMetaDataColorAspects(msg, meta);
AString tsSchema;
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 9020fc1..4e5b5e2 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,8 +19,7 @@
#include <utils/Log.h>
#define ATRACE_TAG ATRACE_TAG_VIDEO
#include <utils/Trace.h>
-
-#include <sys/time.h>
+#include <utils/String16.h>
#include <binder/IServiceManager.h>
#include <gui/ISurfaceComposer.h>
@@ -32,321 +31,14 @@
namespace android {
-static const nsecs_t kNanosIn1s = 1000000000;
-
-template<class T>
-static int compare(const T *lhs, const T *rhs) {
- if (*lhs < *rhs) {
- return -1;
- } else if (*lhs > *rhs) {
- return 1;
- } else {
- return 0;
- }
-}
-
-/* ======================================================================= */
-/* PLL */
-/* ======================================================================= */
-
-static const size_t kMinSamplesToStartPrime = 3;
-static const size_t kMinSamplesToStopPrime = VideoFrameScheduler::kHistorySize;
-static const size_t kMinSamplesToEstimatePeriod = 3;
-static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
-
-static const size_t kPrecision = 12;
-static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
-static const int64_t kMultiplesThresholdDiv = 4; // 25%
-static const int64_t kReFitThresholdDiv = 100; // 1%
-static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s; // 1 sec
-static const nsecs_t kMinPeriod = kNanosIn1s / 120; // 120Hz
-static const nsecs_t kRefitRefreshPeriod = 10 * kNanosIn1s; // 10 sec
-
-VideoFrameScheduler::PLL::PLL()
- : mPeriod(-1),
- mPhase(0),
- mPrimed(false),
- mSamplesUsedForPriming(0),
- mLastTime(-1),
- mNumSamples(0) {
-}
-
-void VideoFrameScheduler::PLL::reset(float fps) {
- //test();
-
- mSamplesUsedForPriming = 0;
- mLastTime = -1;
-
- // set up or reset video PLL
- if (fps <= 0.f) {
- mPeriod = -1;
- mPrimed = false;
- } else {
- ALOGV("reset at %.1f fps", fps);
- mPeriod = (nsecs_t)(1e9 / fps + 0.5);
- mPrimed = true;
- }
-
- restart();
-}
-
-// reset PLL but keep previous period estimate
-void VideoFrameScheduler::PLL::restart() {
- mNumSamples = 0;
- mPhase = -1;
-}
-
-#if 0
-
-void VideoFrameScheduler::PLL::test() {
- nsecs_t period = kNanosIn1s / 60;
- mTimes[0] = 0;
- mTimes[1] = period;
- mTimes[2] = period * 3;
- mTimes[3] = period * 4;
- mTimes[4] = period * 7;
- mTimes[5] = period * 8;
- mTimes[6] = period * 10;
- mTimes[7] = period * 12;
- mNumSamples = 8;
- int64_t a, b, err;
- fit(0, period * 12 / 7, 8, &a, &b, &err);
- // a = 0.8(5)+
- // b = -0.14097(2)+
- // err = 0.2750578(703)+
- ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
- (long long)a, (a / (float)(1 << kPrecision)),
- (long long)b, (b / (float)(1 << kPrecision)),
- (long long)err, (err / (float)(1 << (kPrecision * 2))));
-}
-
-#endif
-
-bool VideoFrameScheduler::PLL::fit(
- nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
- int64_t *a, int64_t *b, int64_t *err) {
- if (numSamplesToUse > mNumSamples) {
- numSamplesToUse = mNumSamples;
- }
-
- if ((period >> kPrecision) == 0 ) {
- ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
- return false;
- }
-
- int64_t sumX = 0;
- int64_t sumXX = 0;
- int64_t sumXY = 0;
- int64_t sumYY = 0;
- int64_t sumY = 0;
-
- int64_t x = 0; // x usually is in [0..numSamplesToUse)
- nsecs_t lastTime;
- for (size_t i = 0; i < numSamplesToUse; i++) {
- size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize;
- nsecs_t time = mTimes[ix];
- if (i > 0) {
- x += divRound(time - lastTime, period);
- }
- // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision
- // ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during
- // priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod
- // while we are not refitting.
- int64_t y = divRound(time - phase, period >> kPrecision);
- sumX += x;
- sumY += y;
- sumXX += x * x;
- sumXY += x * y;
- sumYY += y * y;
- lastTime = time;
- }
-
- int64_t div = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
- if (div == 0) {
- return false;
- }
-
- int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
- int64_t b_nom = sumXX * sumY - sumX * sumXY;
- *a = divRound(a_nom, div);
- *b = divRound(b_nom, div);
- // don't use a and b directly as the rounding error is significant
- *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div);
- ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
- numSamplesToUse,
- (long long)*a, (*a / (float)(1 << kPrecision)),
- (long long)*b, (*b / (float)(1 << kPrecision)),
- (long long)*err, (*err / (float)(1 << (kPrecision * 2))));
- return true;
-}
-
-void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) {
- if (numSamplesToUse > mNumSamples) {
- numSamplesToUse = mNumSamples;
- }
- CHECK(numSamplesToUse >= 3); // must have at least 3 samples
-
- // estimate video framerate from deltas between timestamps, and
- // 2nd order deltas
- Vector<nsecs_t> deltas;
- nsecs_t lastTime, firstTime;
- for (size_t i = 0; i < numSamplesToUse; ++i) {
- size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize;
- nsecs_t time = mTimes[index];
- if (i > 0) {
- if (time - lastTime > kMinPeriod) {
- //ALOGV("delta: %lld", (long long)(time - lastTime));
- deltas.push(time - lastTime);
- }
- } else {
- firstTime = time;
- }
- lastTime = time;
- }
- deltas.sort(compare<nsecs_t>);
- size_t numDeltas = deltas.size();
- if (numDeltas > 1) {
- nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
- nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
- for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
- if (deltas[i] > deltaMaxLimit) {
- deltas.resize(i);
- numDeltas = i;
- break;
- }
- }
- for (size_t i = 1; i < numDeltas; ++i) {
- nsecs_t delta2nd = deltas[i] - deltas[i - 1];
- if (delta2nd >= deltaMinLimit) {
- //ALOGV("delta2: %lld", (long long)(delta2nd));
- deltas.push(delta2nd);
- }
- }
- }
-
- // use the one that yields the best match
- int64_t bestScore;
- for (size_t i = 0; i < deltas.size(); ++i) {
- nsecs_t delta = deltas[i];
- int64_t score = 0;
-#if 1
- // simplest score: number of deltas that are near multiples
- size_t matches = 0;
- for (size_t j = 0; j < deltas.size(); ++j) {
- nsecs_t err = periodicError(deltas[j], delta);
- if (err < delta / kMultiplesThresholdDiv) {
- ++matches;
- }
- }
- score = matches;
-#if 0
- // could be weighed by the (1 - normalized error)
- if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
- int64_t a, b, err;
- fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
- err = (1 << (2 * kPrecision)) - err;
- score *= max(0, err);
- }
-#endif
-#else
- // or use the error as a negative score
- if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
- int64_t a, b, err;
- fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
- score = -delta * err;
- }
-#endif
- if (i == 0 || score > bestScore) {
- bestScore = score;
- mPeriod = delta;
- mPhase = firstTime;
- }
- }
- ALOGV("priming[%zu] phase:%lld period:%lld",
- numSamplesToUse, (long long)mPhase, (long long)mPeriod);
-}
-
-nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
- if (mLastTime >= 0
- // if time goes backward, or we skipped rendering
- && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) {
- restart();
- }
-
- mLastTime = time;
- mTimes[mNumSamples % kHistorySize] = time;
- ++mNumSamples;
-
- bool doFit = time > mRefitAt;
- if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) {
- prime(kMinSamplesToStopPrime);
- ++mSamplesUsedForPriming;
- doFit = true;
- }
- if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) {
- if (mPhase < 0) {
- // initialize phase to the current render time
- mPhase = time;
- doFit = true;
- } else if (!doFit) {
- int64_t err = periodicError(time - mPhase, mPeriod);
- doFit = err > mPeriod / kReFitThresholdDiv;
- }
-
- if (doFit) {
- int64_t a, b, err;
- if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) {
- // samples are not suitable for fitting. this means they are
- // also not suitable for priming.
- ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod);
- return mPeriod;
- }
-
- mRefitAt = time + kRefitRefreshPeriod;
-
- mPhase += (mPeriod * b) >> kPrecision;
- mPeriod = (mPeriod * a) >> kPrecision;
- ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
-
- if (err < kErrorThreshold) {
- if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) {
- mPrimed = true;
- }
- } else {
- mPrimed = false;
- mSamplesUsedForPriming = 0;
- }
- }
- }
- return mPeriod;
-}
-
-nsecs_t VideoFrameScheduler::PLL::getPeriod() const {
- return mPrimed ? mPeriod : 0;
-}
-
-/* ======================================================================= */
-/* Frame Scheduler */
-/* ======================================================================= */
-
-static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60; // 60Hz
-static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s; // 1 sec
-
-VideoFrameScheduler::VideoFrameScheduler()
- : mVsyncTime(0),
- mVsyncPeriod(0),
- mVsyncRefreshAt(0),
- mLastVsyncTime(-1),
- mTimeCorrection(0) {
+VideoFrameScheduler::VideoFrameScheduler() : VideoFrameSchedulerBase() {
}
void VideoFrameScheduler::updateVsync() {
mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
- mVsyncPeriod = 0;
mVsyncTime = 0;
+ mVsyncPeriod = 0;
- // TODO: schedule frames for the destination surface
- // For now, surface flinger only schedules frames on the primary display
if (mComposer == NULL) {
String16 name("SurfaceFlinger");
sp<IServiceManager> sm = defaultServiceManager();
@@ -368,136 +60,6 @@
}
}
-void VideoFrameScheduler::init(float videoFps) {
- updateVsync();
-
- mLastVsyncTime = -1;
- mTimeCorrection = 0;
-
- mPll.reset(videoFps);
-}
-
-void VideoFrameScheduler::restart() {
- mLastVsyncTime = -1;
- mTimeCorrection = 0;
-
- mPll.restart();
-}
-
-nsecs_t VideoFrameScheduler::getVsyncPeriod() {
- if (mVsyncPeriod > 0) {
- return mVsyncPeriod;
- }
- return kDefaultVsyncPeriod;
-}
-
-float VideoFrameScheduler::getFrameRate() {
- nsecs_t videoPeriod = mPll.getPeriod();
- if (videoPeriod > 0) {
- return 1e9 / videoPeriod;
- }
- return 0.f;
-}
-
-nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
- nsecs_t origRenderTime = renderTime;
-
- nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
- if (now >= mVsyncRefreshAt) {
- updateVsync();
- }
-
- // without VSYNC info, there is nothing to do
- if (mVsyncPeriod == 0) {
- ALOGV("no vsync: render=%lld", (long long)renderTime);
- return renderTime;
- }
-
- // ensure vsync time is well before (corrected) render time
- if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
- mVsyncTime -=
- ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
- }
-
- // Video presentation takes place at the VSYNC _after_ renderTime. Adjust renderTime
- // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
- renderTime -= mVsyncPeriod / 2;
-
- const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
- if (videoPeriod > 0) {
- // Smooth out rendering
- size_t N = 12;
- nsecs_t fiveSixthDev =
- abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
- / (mVsyncPeriod / 100);
- // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
- if (fiveSixthDev < 12) { /* 12% / 6 = 2% */
- N = 20;
- }
-
- nsecs_t offset = 0;
- nsecs_t edgeRemainder = 0;
- for (size_t i = 1; i <= N; i++) {
- offset +=
- (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
- edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
- }
- mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
- renderTime += mTimeCorrection;
- nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
- edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
- if (edgeRemainder <= mVsyncPeriod / 3) {
- correctionLimit /= 2;
- }
-
- // estimate how many VSYNCs a frame will spend on the display
- nsecs_t nextVsyncTime =
- renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
- if (mLastVsyncTime >= 0) {
- size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
- size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
- bool vsyncsPerFrameAreNearlyConstant =
- periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;
-
- if (mTimeCorrection > correctionLimit &&
- (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
- // remove a VSYNC
- mTimeCorrection -= mVsyncPeriod / 2;
- renderTime -= mVsyncPeriod / 2;
- nextVsyncTime -= mVsyncPeriod;
- if (vsyncsForLastFrame > 0)
- --vsyncsForLastFrame;
- } else if (mTimeCorrection < -correctionLimit &&
- (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
- // add a VSYNC
- mTimeCorrection += mVsyncPeriod / 2;
- renderTime += mVsyncPeriod / 2;
- nextVsyncTime += mVsyncPeriod;
- if (vsyncsForLastFrame < ULONG_MAX)
- ++vsyncsForLastFrame;
- } else if (mTimeCorrection < -correctionLimit * 2
- || mTimeCorrection > correctionLimit * 2) {
- ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
- " restarting. render=%lld",
- (long long)mTimeCorrection, (long long)correctionLimit,
- vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
- restart();
- return origRenderTime;
- }
-
- ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
- }
- mLastVsyncTime = nextVsyncTime;
- }
-
- // align rendertime to the center between VSYNC edges
- renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
- renderTime += mVsyncPeriod / 2;
- ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
- ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
- return renderTime;
-}
-
void VideoFrameScheduler::release() {
mComposer.clear();
}
@@ -507,4 +69,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/VideoFrameScheduler2.cpp b/media/libstagefright/VideoFrameScheduler2.cpp
new file mode 100644
index 0000000..e02ae7d
--- /dev/null
+++ b/media/libstagefright/VideoFrameScheduler2.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrameScheduler2"
+#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <jni.h>
+#include <math.h>
+
+#include <android/choreographer.h>
+#include <android/looper.h>
+#include <media/stagefright/VideoFrameScheduler2.h>
+#include <mediaplayer2/JavaVMHelper.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+namespace android {
+
+static void getVsyncOffset(long* appVsyncOffsetPtr, long* sfVsyncOffsetPtr);
+
+/* ======================================================================= */
+/* VsyncTracker */
+/* ======================================================================= */
+
+class VsyncTracker : public RefBase{
+public:
+ VsyncTracker();
+ ~VsyncTracker() {}
+ long getVsyncPeriod();
+ long getVsyncTime(long periodOffset);
+ void addSample(long timestamp);
+
+private:
+ static const int kMaxSamples = 32;
+ static const int kMinSamplesForUpdate = 6;
+ int mNumSamples;
+ int mFirstSample;
+ long mReferenceTime;
+ long mPhase;
+ long mPeriod;
+ long mTimestampSamples[kMaxSamples];
+ Mutex mLock;
+
+ void updateModelLocked();
+};
+
+VsyncTracker::VsyncTracker()
+ : mNumSamples(0),
+ mFirstSample(0),
+ mReferenceTime(0),
+ mPhase(0),
+ mPeriod(0) {
+ for (int i = 0; i < kMaxSamples; i++) {
+ mTimestampSamples[i] = 0;
+ }
+}
+
+long VsyncTracker::getVsyncPeriod() {
+ Mutex::Autolock dataLock(mLock);
+ return mPeriod;
+}
+
+long VsyncTracker::getVsyncTime(long periodOffset) {
+ Mutex::Autolock dataLock(mLock);
+ const long now = systemTime();
+ long phase = mReferenceTime + mPhase;
+ return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
+}
+
+void VsyncTracker::addSample(long timestamp) {
+ Mutex::Autolock dataLock(mLock);
+ if (mNumSamples == 0) {
+ mPhase = 0;
+ mReferenceTime = timestamp;
+ }
+ int idx = (mFirstSample + mNumSamples) % kMaxSamples;
+ mTimestampSamples[idx] = timestamp;
+ if (mNumSamples < kMaxSamples) {
+ mNumSamples++;
+ } else {
+ mFirstSample = (mFirstSample + 1) % kMaxSamples;
+ }
+ updateModelLocked();
+}
+
+void VsyncTracker::updateModelLocked() {
+ if (mNumSamples < kMinSamplesForUpdate) {
+ return;
+ }
+ long durationSum = 0;
+ long minDuration = LONG_MAX;
+ long maxDuration = 0;
+
+ for (int i = 1; i < mNumSamples; i++) {
+ int idx = (mFirstSample + i) % kMaxSamples;
+ int prev = (idx + kMaxSamples - 1) % kMaxSamples;
+ long duration = mTimestampSamples[idx] - mTimestampSamples[prev];
+ durationSum += duration;
+ minDuration = min(minDuration, duration);
+ maxDuration = max(maxDuration, duration);
+ }
+
+ durationSum -= (minDuration + maxDuration);
+ mPeriod = durationSum / (mNumSamples - 3);
+
+ double sampleAvgX = 0.0;
+ double sampleAvgY = 0.0;
+ double scale = 2.0 * M_PI / (double) mPeriod;
+
+ for (int i = 1; i < mNumSamples; i++) {
+ int idx = (mFirstSample + i) % kMaxSamples;
+ long sample = mTimestampSamples[idx] - mReferenceTime;
+ double samplePhase = (double) (sample % mPeriod) * scale;
+ sampleAvgX += cos(samplePhase);
+ sampleAvgY += sin(samplePhase);
+ }
+
+ sampleAvgX /= (double) mNumSamples - 1.0;
+ sampleAvgY /= (double) mNumSamples - 1.0;
+ mPhase = (long) (atan2(sampleAvgY, sampleAvgX) / scale);
+}
+
+static void frameCallback(long frameTimeNanos, void* data) {
+ if (data == NULL) {
+ return;
+ }
+ sp<VsyncTracker> vsyncTracker(static_cast<VsyncTracker*>(data));
+ vsyncTracker->addSample(frameTimeNanos);
+ AChoreographer_postFrameCallback(AChoreographer_getInstance(),
+ frameCallback, static_cast<void*>(vsyncTracker.get()));
+}
+
+/* ======================================================================= */
+/* JNI */
+/* ======================================================================= */
+
+static void getVsyncOffset(long* appVsyncOffsetPtr, long* sfVsyncOffsetPtr) {
+ static const long kOneMillisecInNanosec = 1000000;
+ static const long kOneSecInNanosec = kOneMillisecInNanosec * 1000;
+
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jclass jDisplayManagerGlobalCls = env->FindClass(
+ "android/hardware/display/DisplayManagerGlobal");
+ jclass jDisplayCls = env->FindClass("android/view/Display");
+
+ jmethodID jGetInstance = env->GetStaticMethodID(jDisplayManagerGlobalCls,
+ "getInstance", "()Landroid/hardware/display/DisplayManagerGlobal;");
+ jobject javaDisplayManagerGlobalObj = env->CallStaticObjectMethod(
+ jDisplayManagerGlobalCls, jGetInstance);
+
+ jfieldID jDEFAULT_DISPLAY = env->GetStaticFieldID(jDisplayCls, "DEFAULT_DISPLAY", "I");
+ jint DEFAULT_DISPLAY = env->GetStaticIntField(jDisplayCls, jDEFAULT_DISPLAY);
+
+ jmethodID jgetRealDisplay = env->GetMethodID(jDisplayManagerGlobalCls,
+ "getRealDisplay", "(I)Landroid/view/Display;");
+ jobject javaDisplayObj = env->CallObjectMethod(
+ javaDisplayManagerGlobalObj, jgetRealDisplay, DEFAULT_DISPLAY);
+
+ jmethodID jGetRefreshRate = env->GetMethodID(jDisplayCls, "getRefreshRate", "()F");
+ jfloat javaRefreshRate = env->CallFloatMethod(javaDisplayObj, jGetRefreshRate);
+ long vsyncPeriod = (long) (kOneSecInNanosec / (float) javaRefreshRate);
+
+ jmethodID jGetAppVsyncOffsetNanos = env->GetMethodID(
+ jDisplayCls, "getAppVsyncOffsetNanos", "()J");
+ jlong javaAppVsyncOffset = env->CallLongMethod(javaDisplayObj, jGetAppVsyncOffsetNanos);
+ *appVsyncOffsetPtr = (long) javaAppVsyncOffset;
+
+ jmethodID jGetPresentationDeadlineNanos = env->GetMethodID(
+ jDisplayCls, "getPresentationDeadlineNanos", "()J");
+ jlong javaPresentationDeadline = env->CallLongMethod(
+ javaDisplayObj, jGetPresentationDeadlineNanos);
+
+ *sfVsyncOffsetPtr = vsyncPeriod - ((long) javaPresentationDeadline - kOneMillisecInNanosec);
+}
+
+/* ======================================================================= */
+/* Choreographer Thread */
+/* ======================================================================= */
+
+struct ChoreographerThread : public Thread {
+ ChoreographerThread(bool canCallJava);
+ status_t init(void* data);
+ virtual status_t readyToRun() override;
+ virtual bool threadLoop() override;
+
+protected:
+ virtual ~ChoreographerThread() {}
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(ChoreographerThread);
+ void* mData;
+};
+
+ChoreographerThread::ChoreographerThread(bool canCallJava) : Thread(canCallJava) {
+}
+
+status_t ChoreographerThread::init(void* data) {
+ if (data == NULL) {
+ return NO_INIT;
+ }
+ mData = data;
+ return OK;
+}
+
+status_t ChoreographerThread::readyToRun() {
+ ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+ if (AChoreographer_getInstance() == NULL) {
+ return NO_INIT;
+ }
+ AChoreographer_postFrameCallback(AChoreographer_getInstance(), frameCallback, mData);
+ return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+ ALooper_pollOnce(-1, nullptr, nullptr, nullptr);
+ return true;
+}
+
+/* ======================================================================= */
+/* Frame Scheduler */
+/* ======================================================================= */
+
+VideoFrameScheduler2::VideoFrameScheduler2() : VideoFrameSchedulerBase() {
+
+ getVsyncOffset(&mAppVsyncOffset, &mSfVsyncOffset);
+
+ Mutex::Autolock threadLock(mLock);
+ mChoreographerThread = new ChoreographerThread(true);
+
+ mVsyncTracker = new VsyncTracker();
+ if (mChoreographerThread->init(static_cast<void*>(mVsyncTracker.get())) != OK) {
+ mChoreographerThread.clear();
+ }
+ if (mChoreographerThread != NULL && mChoreographerThread->run("Choreographer") != OK) {
+ mChoreographerThread.clear();
+ }
+}
+
+void VideoFrameScheduler2::updateVsync() {
+ mVsyncTime = 0;
+ mVsyncPeriod = 0;
+
+ if (mVsyncTracker != NULL) {
+ mVsyncPeriod = mVsyncTracker->getVsyncPeriod();
+ mVsyncTime = mVsyncTracker->getVsyncTime(mSfVsyncOffset - mAppVsyncOffset);
+ }
+ mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
+}
+
+void VideoFrameScheduler2::release() {
+ // Do not change order
+ {
+ Mutex::Autolock threadLock(mLock);
+ mChoreographerThread->requestExitAndWait();
+ mChoreographerThread.clear();
+ }
+
+ mVsyncTracker.clear();
+}
+
+VideoFrameScheduler2::~VideoFrameScheduler2() {
+ release();
+}
+
+} // namespace android
diff --git a/media/libstagefright/VideoFrameSchedulerBase.cpp b/media/libstagefright/VideoFrameSchedulerBase.cpp
new file mode 100644
index 0000000..77107ff
--- /dev/null
+++ b/media/libstagefright/VideoFrameSchedulerBase.cpp
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrameSchedulerBase"
+#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/VideoFrameSchedulerBase.h>
+
+namespace android {
+
+template<class T>
+static int compare(const T *lhs, const T *rhs) {
+ if (*lhs < *rhs) {
+ return -1;
+ } else if (*lhs > *rhs) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* ======================================================================= */
+/* PLL */
+/* ======================================================================= */
+
+static const size_t kMinSamplesToStartPrime = 3;
+static const size_t kMinSamplesToStopPrime = VideoFrameSchedulerBase::kHistorySize;
+static const size_t kMinSamplesToEstimatePeriod = 3;
+static const size_t kMaxSamplesToEstimatePeriod = VideoFrameSchedulerBase::kHistorySize;
+
+static const size_t kPrecision = 12;
+static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kMultiplesThresholdDiv = 4; // 25%
+static const int64_t kReFitThresholdDiv = 100; // 1%
+static const nsecs_t kMaxAllowedFrameSkip = VideoFrameSchedulerBase::kNanosIn1s; // 1 sec
+static const nsecs_t kMinPeriod = VideoFrameSchedulerBase::kNanosIn1s / 120; // 120Hz
+static const nsecs_t kRefitRefreshPeriod = 10 * VideoFrameSchedulerBase::kNanosIn1s; // 10 sec
+
+VideoFrameSchedulerBase::PLL::PLL()
+ : mPeriod(-1),
+ mPhase(0),
+ mPrimed(false),
+ mSamplesUsedForPriming(0),
+ mLastTime(-1),
+ mNumSamples(0) {
+}
+
+void VideoFrameSchedulerBase::PLL::reset(float fps) {
+ //test();
+
+ mSamplesUsedForPriming = 0;
+ mLastTime = -1;
+
+ // set up or reset video PLL
+ if (fps <= 0.f) {
+ mPeriod = -1;
+ mPrimed = false;
+ } else {
+ ALOGV("reset at %.1f fps", fps);
+ mPeriod = (nsecs_t)(1e9 / fps + 0.5);
+ mPrimed = true;
+ }
+
+ restart();
+}
+
+// reset PLL but keep previous period estimate
+void VideoFrameSchedulerBase::PLL::restart() {
+ mNumSamples = 0;
+ mPhase = -1;
+}
+
+#if 0
+
+void VideoFrameSchedulerBase::PLL::test() {
+ nsecs_t period = VideoFrameSchedulerBase::kNanosIn1s / 60;
+ mTimes[0] = 0;
+ mTimes[1] = period;
+ mTimes[2] = period * 3;
+ mTimes[3] = period * 4;
+ mTimes[4] = period * 7;
+ mTimes[5] = period * 8;
+ mTimes[6] = period * 10;
+ mTimes[7] = period * 12;
+ mNumSamples = 8;
+ int64_t a, b, err;
+ fit(0, period * 12 / 7, 8, &a, &b, &err);
+ // a = 0.8(5)+
+ // b = -0.14097(2)+
+ // err = 0.2750578(703)+
+ ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+ (long long)a, (a / (float)(1 << kPrecision)),
+ (long long)b, (b / (float)(1 << kPrecision)),
+ (long long)err, (err / (float)(1 << (kPrecision * 2))));
+}
+
+#endif
+
+bool VideoFrameSchedulerBase::PLL::fit(
+ nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
+ int64_t *a, int64_t *b, int64_t *err) {
+ if (numSamplesToUse > mNumSamples) {
+ numSamplesToUse = mNumSamples;
+ }
+
+ if ((period >> kPrecision) == 0 ) {
+ ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
+ return false;
+ }
+
+ int64_t sumX = 0;
+ int64_t sumXX = 0;
+ int64_t sumXY = 0;
+ int64_t sumYY = 0;
+ int64_t sumY = 0;
+
+ int64_t x = 0; // x usually is in [0..numSamplesToUse)
+ nsecs_t lastTime;
+ for (size_t i = 0; i < numSamplesToUse; i++) {
+ size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+ nsecs_t time = mTimes[ix];
+ if (i > 0) {
+ x += divRound(time - lastTime, period);
+ }
+ // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision
+ // ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during
+ // priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod
+ // while we are not refitting.
+ int64_t y = divRound(time - phase, period >> kPrecision);
+ sumX += x;
+ sumY += y;
+ sumXX += x * x;
+ sumXY += x * y;
+ sumYY += y * y;
+ lastTime = time;
+ }
+
+ int64_t div = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
+ if (div == 0) {
+ return false;
+ }
+
+ int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
+ int64_t b_nom = sumXX * sumY - sumX * sumXY;
+ *a = divRound(a_nom, div);
+ *b = divRound(b_nom, div);
+ // don't use a and b directly as the rounding error is significant
+ *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div);
+ ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+ numSamplesToUse,
+ (long long)*a, (*a / (float)(1 << kPrecision)),
+ (long long)*b, (*b / (float)(1 << kPrecision)),
+ (long long)*err, (*err / (float)(1 << (kPrecision * 2))));
+ return true;
+}
+
+void VideoFrameSchedulerBase::PLL::prime(size_t numSamplesToUse) {
+ if (numSamplesToUse > mNumSamples) {
+ numSamplesToUse = mNumSamples;
+ }
+ CHECK(numSamplesToUse >= 3); // must have at least 3 samples
+
+ // estimate video framerate from deltas between timestamps, and
+ // 2nd order deltas
+ Vector<nsecs_t> deltas;
+ nsecs_t lastTime, firstTime;
+ for (size_t i = 0; i < numSamplesToUse; ++i) {
+ size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+ nsecs_t time = mTimes[index];
+ if (i > 0) {
+ if (time - lastTime > kMinPeriod) {
+ //ALOGV("delta: %lld", (long long)(time - lastTime));
+ deltas.push(time - lastTime);
+ }
+ } else {
+ firstTime = time;
+ }
+ lastTime = time;
+ }
+ deltas.sort(compare<nsecs_t>);
+ size_t numDeltas = deltas.size();
+ if (numDeltas > 1) {
+ nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
+ nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
+ for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
+ if (deltas[i] > deltaMaxLimit) {
+ deltas.resize(i);
+ numDeltas = i;
+ break;
+ }
+ }
+ for (size_t i = 1; i < numDeltas; ++i) {
+ nsecs_t delta2nd = deltas[i] - deltas[i - 1];
+ if (delta2nd >= deltaMinLimit) {
+ //ALOGV("delta2: %lld", (long long)(delta2nd));
+ deltas.push(delta2nd);
+ }
+ }
+ }
+
+ // use the one that yields the best match
+ int64_t bestScore;
+ for (size_t i = 0; i < deltas.size(); ++i) {
+ nsecs_t delta = deltas[i];
+ int64_t score = 0;
+#if 1
+ // simplest score: number of deltas that are near multiples
+ size_t matches = 0;
+ for (size_t j = 0; j < deltas.size(); ++j) {
+ nsecs_t err = periodicError(deltas[j], delta);
+ if (err < delta / kMultiplesThresholdDiv) {
+ ++matches;
+ }
+ }
+ score = matches;
+#if 0
+ // could be weighed by the (1 - normalized error)
+ if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+ int64_t a, b, err;
+ fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+ err = (1 << (2 * kPrecision)) - err;
+ score *= max(0, err);
+ }
+#endif
+#else
+ // or use the error as a negative score
+ if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+ int64_t a, b, err;
+ fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+ score = -delta * err;
+ }
+#endif
+ if (i == 0 || score > bestScore) {
+ bestScore = score;
+ mPeriod = delta;
+ mPhase = firstTime;
+ }
+ }
+ ALOGV("priming[%zu] phase:%lld period:%lld",
+ numSamplesToUse, (long long)mPhase, (long long)mPeriod);
+}
+
+nsecs_t VideoFrameSchedulerBase::PLL::addSample(nsecs_t time) {
+ if (mLastTime >= 0
+ // if time goes backward, or we skipped rendering
+ && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) {
+ restart();
+ }
+
+ mLastTime = time;
+ mTimes[mNumSamples % kHistorySize] = time;
+ ++mNumSamples;
+
+ bool doFit = time > mRefitAt;
+ if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) {
+ prime(kMinSamplesToStopPrime);
+ ++mSamplesUsedForPriming;
+ doFit = true;
+ }
+ if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) {
+ if (mPhase < 0) {
+ // initialize phase to the current render time
+ mPhase = time;
+ doFit = true;
+ } else if (!doFit) {
+ int64_t err = periodicError(time - mPhase, mPeriod);
+ doFit = err > mPeriod / kReFitThresholdDiv;
+ }
+
+ if (doFit) {
+ int64_t a, b, err;
+ if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) {
+ // samples are not suitable for fitting. this means they are
+ // also not suitable for priming.
+ ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod);
+ return mPeriod;
+ }
+
+ mRefitAt = time + kRefitRefreshPeriod;
+
+ mPhase += (mPeriod * b) >> kPrecision;
+ mPeriod = (mPeriod * a) >> kPrecision;
+ ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
+
+ if (err < kErrorThreshold) {
+ if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) {
+ mPrimed = true;
+ }
+ } else {
+ mPrimed = false;
+ mSamplesUsedForPriming = 0;
+ }
+ }
+ }
+ return mPeriod;
+}
+
+nsecs_t VideoFrameSchedulerBase::PLL::getPeriod() const {
+ return mPrimed ? mPeriod : 0;
+}
+
+/* ======================================================================= */
+/* Frame Scheduler */
+/* ======================================================================= */
+
+VideoFrameSchedulerBase::VideoFrameSchedulerBase()
+ : mVsyncTime(0),
+ mVsyncPeriod(0),
+ mVsyncRefreshAt(0),
+ mLastVsyncTime(-1),
+ mTimeCorrection(0) {
+}
+
+void VideoFrameSchedulerBase::init(float videoFps) {
+ updateVsync();
+
+ mLastVsyncTime = -1;
+ mTimeCorrection = 0;
+
+ mPll.reset(videoFps);
+}
+
+void VideoFrameSchedulerBase::restart() {
+ mLastVsyncTime = -1;
+ mTimeCorrection = 0;
+
+ mPll.restart();
+}
+
+nsecs_t VideoFrameSchedulerBase::getVsyncPeriod() {
+ if (mVsyncPeriod > 0) {
+ return mVsyncPeriod;
+ }
+ return kDefaultVsyncPeriod;
+}
+
+float VideoFrameSchedulerBase::getFrameRate() {
+ nsecs_t videoPeriod = mPll.getPeriod();
+ if (videoPeriod > 0) {
+ return 1e9 / videoPeriod;
+ }
+ return 0.f;
+}
+
+nsecs_t VideoFrameSchedulerBase::schedule(nsecs_t renderTime) {
+ nsecs_t origRenderTime = renderTime;
+
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ if (now >= mVsyncRefreshAt) {
+ updateVsync();
+ }
+
+ // without VSYNC info, there is nothing to do
+ if (mVsyncPeriod == 0) {
+ ALOGV("no vsync: render=%lld", (long long)renderTime);
+ return renderTime;
+ }
+
+ // ensure vsync time is well before (corrected) render time
+ if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
+ mVsyncTime -=
+ ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
+ }
+
+ // Video presentation takes place at the VSYNC _after_ renderTime. Adjust renderTime
+ // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
+ renderTime -= mVsyncPeriod / 2;
+
+ const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
+ if (videoPeriod > 0) {
+ // Smooth out rendering
+ size_t N = 12;
+ nsecs_t fiveSixthDev =
+ abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
+ / (mVsyncPeriod / 100);
+ // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
+ if (fiveSixthDev < 12) { /* 12% / 6 = 2% */
+ N = 20;
+ }
+
+ nsecs_t offset = 0;
+ nsecs_t edgeRemainder = 0;
+ for (size_t i = 1; i <= N; i++) {
+ offset +=
+ (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
+ edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
+ }
+ mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
+ renderTime += mTimeCorrection;
+ nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
+ edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
+ if (edgeRemainder <= mVsyncPeriod / 3) {
+ correctionLimit /= 2;
+ }
+
+ // estimate how many VSYNCs a frame will spend on the display
+ nsecs_t nextVsyncTime =
+ renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
+ if (mLastVsyncTime >= 0) {
+ size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
+ size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
+ bool vsyncsPerFrameAreNearlyConstant =
+ periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;
+
+ if (mTimeCorrection > correctionLimit &&
+ (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
+ // remove a VSYNC
+ mTimeCorrection -= mVsyncPeriod / 2;
+ renderTime -= mVsyncPeriod / 2;
+ nextVsyncTime -= mVsyncPeriod;
+ if (vsyncsForLastFrame > 0)
+ --vsyncsForLastFrame;
+ } else if (mTimeCorrection < -correctionLimit &&
+ (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
+ // add a VSYNC
+ mTimeCorrection += mVsyncPeriod / 2;
+ renderTime += mVsyncPeriod / 2;
+ nextVsyncTime += mVsyncPeriod;
+ if (vsyncsForLastFrame < ULONG_MAX)
+ ++vsyncsForLastFrame;
+ } else if (mTimeCorrection < -correctionLimit * 2
+ || mTimeCorrection > correctionLimit * 2) {
+ ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
+ " restarting. render=%lld",
+ (long long)mTimeCorrection, (long long)correctionLimit,
+ vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
+ restart();
+ return origRenderTime;
+ }
+
+ ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
+ }
+ mLastVsyncTime = nextVsyncTime;
+ }
+
+ // align rendertime to the center between VSYNC edges
+ renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
+ renderTime += mVsyncPeriod / 2;
+ ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
+ ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
+ return renderTime;
+}
+
+VideoFrameSchedulerBase::~VideoFrameSchedulerBase() {}
+
+} // namespace android
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index 81777f1..218fe15 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -25,7 +25,6 @@
],
shared_libs: [
- "libbase",
"libbinder",
"libcutils",
"libgui",
@@ -38,8 +37,6 @@
"libutils",
"android.hardware.graphics.bufferqueue@1.0",
-
- "libnativewindow", // TODO(b/62923479): use header library
],
export_shared_lib_headers: [
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 379d41e..e0f2683 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1058,8 +1058,8 @@
}
}
-OMX_ERRORTYPE SoftAVC::setConfig(
- OMX_INDEXTYPE index, const OMX_PTR _params) {
+OMX_ERRORTYPE SoftAVC::internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig) {
switch ((int)index) {
case OMX_IndexConfigVideoIntraVOPRefresh:
{
@@ -1125,7 +1125,7 @@
}
default:
- return SimpleSoftOMXComponent::setConfig(index, _params);
+ return SimpleSoftOMXComponent::internalSetConfig(index, _params, frameConfig);
}
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index a43cdf1..8253b7d 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -219,8 +219,8 @@
OMX_ERRORTYPE internalSetBitrateParams(
const OMX_VIDEO_PARAM_BITRATETYPE *bitrate);
- OMX_ERRORTYPE setConfig(
- OMX_INDEXTYPE index, const OMX_PTR _params);
+ OMX_ERRORTYPE internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig);
OMX_ERRORTYPE getConfig(
OMX_INDEXTYPE index, const OMX_PTR _params);
diff --git a/media/libstagefright/codecs/flac/dec/Android.bp b/media/libstagefright/codecs/flac/dec/Android.bp
index 1674cb2..3d4a44f 100644
--- a/media/libstagefright/codecs/flac/dec/Android.bp
+++ b/media/libstagefright/codecs/flac/dec/Android.bp
@@ -29,7 +29,6 @@
},
shared_libs: [
- "libcutils",
"liblog",
"libstagefright_flacdec",
"libstagefright_omx",
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/rate_control.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/rate_control.cpp
index 53149c1..ecc3217 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/rate_control.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/rate_control.cpp
@@ -377,15 +377,15 @@
/* In/out : Nr, B, Rr */
/* Return : Void */
/* Modified : */
+/* Input argument "video" is guaranteed non-null by caller */
/* ======================================================================== */
-
PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip)
{
rateControl *rc = video->rc[currLayer];
MultiPass *pMP = video->pMP[currLayer];
- if (video == NULL || rc == NULL || pMP == NULL)
+ if (rc == NULL || pMP == NULL)
return PV_FAIL;
rc->VBV_fullness -= (Int)(rc->bitrate / rc->framerate * num_skip); //rc[currLayer]->Rp;
@@ -524,6 +524,7 @@
/* In/out : rc->T */
/* Return : Void */
/* Modified : */
+/* Input argument "input" is guaranteed non-null by caller */
/* ================================================================================ */
void targetBitCalculation(void *input)
@@ -537,7 +538,7 @@
Int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;
/* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */
- if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)
+ if (currVol == NULL || pMP == NULL || rc == NULL)
return;
/* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/
@@ -693,6 +694,7 @@
/* In/out : rc->T and rc->Qc */
/* Return : Void */
/* Modified : */
+/* Input argument "input" is guaranteed non-null by caller */
/* ================================================================================ */
/* Mad based variable bit allocation + QP calculation with a new quadratic method */
@@ -708,7 +710,7 @@
float curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;
- if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)
+ if (currVol == NULL || pMP == NULL || rc == NULL)
return;
/* Mad based variable bit allocation */
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 8d5f3e7..0f2ff17 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -33,6 +33,7 @@
{ OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level5 },
{ OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Level5 },
{ OMX_VIDEO_VP9Profile2HDR, OMX_VIDEO_VP9Level5 },
+ { OMX_VIDEO_VP9Profile2HDR10Plus, OMX_VIDEO_VP9Level5 },
};
SoftVPX::SoftVPX(
@@ -84,6 +85,10 @@
return true;
}
+bool SoftVPX::supportDescribeHdr10PlusInfo() {
+ return true;
+}
+
status_t SoftVPX::initDecoder() {
mCtx = new vpx_codec_ctx_t;
vpx_codec_err_t vpx_err;
@@ -167,7 +172,12 @@
outHeader->nOffset = 0;
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * bpp * 3) / 2;
- outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
+ PrivInfo *privInfo = (PrivInfo *)mImg->user_priv;
+ outHeader->nTimeStamp = privInfo->mTimeStamp;
+ if (privInfo->mHdr10PlusInfo != nullptr) {
+ queueOutputFrameConfig(privInfo->mHdr10PlusInfo);
+ }
+
if (outputBufferSafe(outHeader)) {
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
@@ -275,7 +285,13 @@
}
}
- mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
+ mPrivInfo[mTimeStampIdx].mTimeStamp = inHeader->nTimeStamp;
+
+ if (inInfo->mFrameConfig) {
+ mPrivInfo[mTimeStampIdx].mHdr10PlusInfo = dequeueInputFrameConfig();
+ } else {
+ mPrivInfo[mTimeStampIdx].mHdr10PlusInfo.clear();
+ }
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
mEOSStatus = INPUT_EOS_SEEN;
@@ -285,7 +301,7 @@
if (inHeader->nFilledLen > 0) {
vpx_codec_err_t err = vpx_codec_decode(
(vpx_codec_ctx_t *)mCtx, inHeader->pBuffer + inHeader->nOffset,
- inHeader->nFilledLen, &mTimeStamps[mTimeStampIdx], 0);
+ inHeader->nFilledLen, &mPrivInfo[mTimeStampIdx], 0);
if (err == VPX_CODEC_OK) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index b62b526..0aa8e9c 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -26,6 +26,8 @@
namespace android {
+struct ABuffer;
+
struct SoftVPX : public SoftVideoDecoderOMXComponent {
SoftVPX(const char *name,
const char *componentRole,
@@ -41,6 +43,7 @@
virtual void onPortFlushCompleted(OMX_U32 portIndex);
virtual void onReset();
virtual bool supportDescribeHdrStaticInfo();
+ virtual bool supportDescribeHdr10PlusInfo();
private:
enum {
@@ -60,7 +63,11 @@
void *mCtx;
bool mFrameParallelMode; // Frame parallel is only supported by VP9 decoder.
- OMX_TICKS mTimeStamps[kNumBuffers];
+ struct PrivInfo {
+ OMX_TICKS mTimeStamp;
+ sp<ABuffer> mHdr10PlusInfo;
+ };
+ PrivInfo mPrivInfo[kNumBuffers];
uint8_t mTimeStampIdx;
vpx_image_t *mImg;
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 2dfba13..d0cb071 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -401,8 +401,8 @@
}
}
-OMX_ERRORTYPE SoftVPXEncoder::setConfig(
- OMX_INDEXTYPE index, const OMX_PTR _params) {
+OMX_ERRORTYPE SoftVPXEncoder::internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig) {
switch (index) {
case OMX_IndexConfigVideoIntraVOPRefresh:
{
@@ -442,7 +442,7 @@
}
default:
- return SimpleSoftOMXComponent::setConfig(index, _params);
+ return SimpleSoftOMXComponent::internalSetConfig(index, _params, frameConfig);
}
}
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index dd86d36..263d134 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -84,8 +84,8 @@
virtual OMX_ERRORTYPE internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR param);
- virtual OMX_ERRORTYPE setConfig(
- OMX_INDEXTYPE index, const OMX_PTR params);
+ virtual OMX_ERRORTYPE internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
// OMX callback when buffers available
// Note that both an input and output buffer
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index f352fba..1d792fd 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -110,15 +110,22 @@
{
initPorts();
- CHECK_EQ(initDecoder(), (status_t)OK);
+ mMemoryVec.clear();
+ mDrcMemoryVec.clear();
+
+ CHECK_EQ(initDecoder(), IA_NO_ERROR);
}
SoftXAAC::~SoftXAAC() {
- int errCode = deInitXAACDecoder();
- if (0 != errCode) {
- ALOGE("deInitXAACDecoder() failed %d", errCode);
+ IA_ERRORCODE err_code = deInitXAACDecoder();
+ if (IA_NO_ERROR != err_code) {
+ ALOGE("deInitXAACDecoder() failed %d", err_code);
}
+ err_code = deInitMPEGDDDrc();
+ if (IA_NO_ERROR != err_code) {
+ ALOGE("deInitMPEGDDDrc() failed %d", err_code);
+ }
mIsCodecInitialized = false;
mIsCodecConfigFlushRequired = false;
}
@@ -164,36 +171,16 @@
addPort(def);
}
-status_t SoftXAAC::initDecoder() {
- status_t status = UNKNOWN_ERROR;
-
+IA_ERRORCODE SoftXAAC::initDecoder() {
int ui_drc_val;
IA_ERRORCODE err_code = IA_NO_ERROR;
int loop = 0;
err_code = initXAACDecoder();
if (err_code != IA_NO_ERROR) {
- if (NULL == mXheaacCodecHandle) {
- ALOGE("AAC decoder handle is null");
- }
- if (NULL == mMpegDDrcHandle) {
- ALOGE("MPEG-D DRC decoder handle is null");
- }
- for (loop = 1; loop < mMallocCount; loop++) {
- if (mMemoryArray[loop] == NULL) {
- ALOGE(" memory allocation error %d\n", loop);
- break;
- }
- }
- ALOGE("initXAACDecoder Failed");
-
- for (loop = 0; loop < mMallocCount; loop++) {
- if (mMemoryArray[loop]) free(mMemoryArray[loop]);
- }
- mMallocCount = 0;
- return status;
- } else {
- status = OK;
+ ALOGE("initXAACDecoder failed with error %d", err_code);
+ deInitXAACDecoder();
+ return err_code;
}
mEndOfInput = false;
@@ -274,7 +261,7 @@
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
#endif
- return status;
+ return IA_NO_ERROR;
}
OMX_ERRORTYPE SoftXAAC::internalGetParameter(OMX_INDEXTYPE index, OMX_PTR params) {
@@ -547,9 +534,6 @@
/* sample currently */
if (mIsCodecInitialized) {
numOutBytes = mOutputFrameLength * (mPcmWdSz / 8) * mNumChannels;
- if ((mPcmWdSz / 8) != 2) {
- ALOGE("XAAC assumes 2 bytes per sample! mPcmWdSz %d", mPcmWdSz);
- }
}
while ((!inQueue.empty() || mEndOfInput) && !outQueue.empty()) {
@@ -569,8 +553,8 @@
inBufferLength = inHeader->nFilledLen;
/* GA header configuration sent to Decoder! */
- int err_code = configXAACDecoder(inBuffer, inBufferLength);
- if (0 != err_code) {
+ IA_ERRORCODE err_code = configXAACDecoder(inBuffer, inBufferLength);
+ if (IA_NO_ERROR != err_code) {
ALOGW("configXAACDecoder err_code = %d", err_code);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
@@ -682,8 +666,8 @@
/* which should initialize the codec. Once this state is reached, call the */
/* decodeXAACStream API with same frame to decode! */
if (!mIsCodecInitialized) {
- int err_code = configXAACDecoder(inBuffer, inBufferLength);
- if (0 != err_code) {
+ IA_ERRORCODE err_code = configXAACDecoder(inBuffer, inBufferLength);
+ if (IA_NO_ERROR != err_code) {
ALOGW("configXAACDecoder Failed 2 err_code = %d", err_code);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
@@ -845,7 +829,7 @@
}
}
-int SoftXAAC::configflushDecode() {
+IA_ERRORCODE SoftXAAC::configflushDecode() {
IA_ERRORCODE err_code;
UWORD32 ui_init_done;
uint32_t inBufferLength = 8203;
@@ -871,16 +855,13 @@
"Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz "
"%d\nchannelMask %d\noutputFrameLength %d",
mSampFreq, mNumChannels, mPcmWdSz, mChannelMask, mOutputFrameLength);
- if (mNumChannels > MAX_CHANNEL_COUNT) {
- ALOGE(" No of channels are more than max channels\n");
- mIsCodecInitialized = false;
- } else
- mIsCodecInitialized = true;
+
+ mIsCodecInitialized = true;
}
- return err_code;
+ return IA_NO_ERROR;
}
-int SoftXAAC::drainDecoder() {
- return 0;
+IA_ERRORCODE SoftXAAC::drainDecoder() {
+ return IA_NO_ERROR;
}
void SoftXAAC::onReset() {
@@ -921,7 +902,7 @@
}
}
-int SoftXAAC::initXAACDecoder() {
+IA_ERRORCODE SoftXAAC::initXAACDecoder() {
LOOPIDX i;
/* Error code */
@@ -939,11 +920,11 @@
UWORD32 ui_proc_mem_tabs_size;
/* API size */
UWORD32 pui_api_size;
+ pVOID pv_alloc_ptr;
mInputBufferSize = 0;
mInputBuffer = 0;
mOutputBuffer = 0;
- mMallocCount = 0;
/* Process struct initing end */
/* ******************************************************************/
@@ -954,20 +935,13 @@
err_code = ixheaacd_dec_api(NULL, IA_API_CMD_GET_API_SIZE, 0, &pui_api_size);
RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
-
/* Allocate memory for API */
- mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
- if (mMemoryArray[mMallocCount] == NULL) {
+ mXheaacCodecHandle = memalign(4, pui_api_size);
+ if (mXheaacCodecHandle == NULL) {
ALOGE("malloc for pui_api_size + 4 >> %d Failed", pui_api_size + 4);
return IA_FATAL_ERROR;
}
- /* Set API object with the memory allocated */
- mXheaacCodecHandle = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
- mMallocCount++;
+ mMemoryVec.push(mXheaacCodecHandle);
/* Set the config params to default values */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT,
@@ -979,23 +953,16 @@
RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
-
/* Allocate memory for API */
- mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+ mMpegDDrcHandle = memalign(4, pui_api_size);
- if (mMemoryArray[mMallocCount] == NULL) {
+ if (mMpegDDrcHandle == NULL) {
ALOGE("malloc for drc api structure Failed");
return IA_FATAL_ERROR;
}
- memset(mMemoryArray[mMallocCount], 0, pui_api_size);
+ mMemoryVec.push(mMpegDDrcHandle);
- /* Set API object with the memory allocated */
- mMpegDDrcHandle = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
- mMallocCount++;
+ memset(mMpegDDrcHandle, 0, pui_api_size);
/* Set the config params to default values */
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
@@ -1021,23 +988,17 @@
&ui_proc_mem_tabs_size);
RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
-
- mMemoryArray[mMallocCount] = memalign(4, ui_proc_mem_tabs_size);
- if (mMemoryArray[mMallocCount] == NULL) {
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+ if (pv_alloc_ptr == NULL) {
ALOGE("Malloc for size (ui_proc_mem_tabs_size + 4) = %d failed!",
ui_proc_mem_tabs_size + 4);
return IA_FATAL_ERROR;
}
- mMallocCount++;
- /* Set pointer for process memory tables */
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
- (pVOID)((WORD8*)mMemoryArray[mMallocCount - 1]));
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+ mMemoryVec.push(pv_alloc_ptr);
+ /* Set pointer for process memory tables */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_MEMTABS_PTR, 0, pv_alloc_ptr);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
/* initialize the API, post config, fill memory tables */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT,
@@ -1066,17 +1027,12 @@
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
- mMemoryArray[mMallocCount] = memalign(ui_alignment, ui_size);
- if (mMemoryArray[mMallocCount] == NULL) {
+ pv_alloc_ptr = memalign(ui_alignment, ui_size);
+ if (pv_alloc_ptr == NULL) {
ALOGE("Malloc for size (ui_size + ui_alignment) = %d failed!", ui_size + ui_alignment);
return IA_FATAL_ERROR;
}
- pv_alloc_ptr = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
- mMallocCount++;
+ mMemoryVec.push(pv_alloc_ptr);
/* Set the buffer pointer */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
@@ -1095,7 +1051,7 @@
return IA_NO_ERROR;
}
-int SoftXAAC::configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength) {
+IA_ERRORCODE SoftXAAC::configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength) {
UWORD32 ui_init_done;
int32_t i_bytes_consumed;
@@ -1154,13 +1110,73 @@
return IA_NO_ERROR;
}
-int SoftXAAC::configMPEGDDrc() {
+IA_ERRORCODE SoftXAAC::initMPEGDDDrc() {
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+ int i;
+
+ for (i = 0; i < (WORD32)2; i++) {
+ WORD32 ui_size, ui_alignment, ui_type;
+ pVOID pv_alloc_ptr;
+
+ /* Get memory size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+ /* Get memory alignment */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i, &ui_alignment);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+ /* Get memory type */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+
+ pv_alloc_ptr = memalign(4, ui_size);
+ if (pv_alloc_ptr == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_size);
+ return IA_FATAL_ERROR;
+ }
+ mDrcMemoryVec.push(pv_alloc_ptr);
+
+ /* Set the buffer pointer */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+ }
+
+ WORD32 ui_size;
+ ui_size = 8192 * 2;
+
+ mDrcInBuf = (int8_t*)memalign(4, ui_size);
+ if (mDrcInBuf == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_size);
+ return IA_FATAL_ERROR;
+ }
+ mDrcMemoryVec.push(mDrcInBuf);
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 2, mDrcInBuf);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+
+ mDrcOutBuf = (int8_t*)memalign(4, ui_size);
+ if (mDrcOutBuf == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_size);
+ return IA_FATAL_ERROR;
+ }
+ mDrcMemoryVec.push(mDrcOutBuf);
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 3, mDrcOutBuf);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+
+ return IA_NO_ERROR;
+}
+IA_ERRORCODE SoftXAAC::configMPEGDDrc() {
IA_ERRORCODE err_code = IA_NO_ERROR;
int i_effect_type;
int i_loud_norm;
int i_target_loudness;
unsigned int i_sbr_mode;
- int n_mems;
int i;
#ifdef ENABLE_MPEG_D_DRC
@@ -1217,78 +1233,16 @@
RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
- for (i = 0; i < (WORD32)2; i++) {
- WORD32 ui_size, ui_alignment, ui_type;
- pVOID pv_alloc_ptr;
+ /* Free any memory that is allocated for MPEG D Drc so far */
+ deInitMPEGDDDrc();
- /* Get memory size */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
-
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
-
- /* Get memory alignment */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i,
- &ui_alignment);
-
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
-
- /* Get memory type */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
-
- mMemoryArray[mMallocCount] = memalign(4, ui_size);
- if (mMemoryArray[mMallocCount] == NULL) {
- ALOGE(" Cannot create requested memory %d", ui_size);
- return IA_FATAL_ERROR;
- }
- pv_alloc_ptr = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
- mMallocCount++;
-
- /* Set the buffer pointer */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
-
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+ err_code = initMPEGDDDrc();
+ if (err_code != IA_NO_ERROR) {
+ ALOGE("initMPEGDDDrc failed with error %d", err_code);
+ deInitMPEGDDDrc();
+ return err_code;
}
- {
- WORD32 ui_size;
- ui_size = 8192 * 2;
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
- mMemoryArray[mMallocCount] = memalign(4, ui_size);
- if (mMemoryArray[mMallocCount] == NULL) {
- ALOGE(" Cannot create requested memory %d", ui_size);
- return IA_FATAL_ERROR;
- }
-
- mDrcInBuf = (int8_t*)mMemoryArray[mMallocCount];
- mMallocCount++;
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 2,
- /*mOutputBuffer*/ mDrcInBuf);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
-
- if (mMallocCount == MAX_MEM_ALLOCS) {
- ALOGE("mMemoryArray is full");
- return IA_FATAL_ERROR;
- }
- mMemoryArray[mMallocCount] = memalign(4, ui_size);
- if (mMemoryArray[mMallocCount] == NULL) {
- ALOGE(" Cannot create requested memory %d", ui_size);
- return IA_FATAL_ERROR;
- }
-
- mDrcOutBuf = (int8_t*)mMemoryArray[mMallocCount];
- mMallocCount++;
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 3,
- /*mOutputBuffer*/ mDrcOutBuf);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
- }
/* DRC buffers
buf[0] - contains extension element pay load loudness related
buf[1] - contains extension element pay load*/
@@ -1423,10 +1377,10 @@
}
#endif
- return err_code;
+ return IA_NO_ERROR;
}
-int SoftXAAC::decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength, int32_t* bytesConsumed,
- int32_t* outBytes) {
+IA_ERRORCODE SoftXAAC::decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength,
+ int32_t* bytesConsumed, int32_t* outBytes) {
if (mInputBufferSize < inBufferLength) {
ALOGE("Cannot config AAC, input buffer size %d < inBufferLength %d", mInputBufferSize,
inBufferLength);
@@ -1516,24 +1470,33 @@
memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
}
#endif
- return err_code;
+ return IA_NO_ERROR;
}
-int SoftXAAC::deInitXAACDecoder() {
+IA_ERRORCODE SoftXAAC::deInitXAACDecoder() {
ALOGI("deInitXAACDecoder");
/* Tell that the input is over in this buffer */
IA_ERRORCODE err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INPUT_OVER, 0, NULL);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_INPUT_OVER");
- for (int i = 0; i < mMallocCount; i++) {
- if (mMemoryArray[i]) free(mMemoryArray[i]);
+ /* Irrespective of error returned in IA_API_CMD_INPUT_OVER, free allocated memory */
+ for (void* buf : mMemoryVec) {
+ free(buf);
}
- mMallocCount = 0;
-
+ mMemoryVec.clear();
return err_code;
}
+IA_ERRORCODE SoftXAAC::deInitMPEGDDDrc() {
+ ALOGI("deInitMPEGDDDrc");
+
+ for (void* buf : mDrcMemoryVec) {
+ free(buf);
+ }
+ mDrcMemoryVec.clear();
+ return IA_NO_ERROR;
+}
+
IA_ERRORCODE SoftXAAC::getXAACStreamInfo() {
IA_ERRORCODE err_code = IA_NO_ERROR;
@@ -1546,11 +1509,19 @@
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS, &mNumChannels);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS");
+ if (mNumChannels > MAX_CHANNEL_COUNT) {
+ ALOGE(" No of channels are more than max channels\n");
+ return IA_FATAL_ERROR;
+ }
/* PCM word size */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ, &mPcmWdSz);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ");
+ if ((mPcmWdSz / 8) != 2) {
+ ALOGE("Invalid Number of bytes per sample: %d, Expected is 2", mPcmWdSz);
+ return IA_FATAL_ERROR;
+ }
/* channel mask to tell the arrangement of channels in bit stream */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.h b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
index 6176082..a62a797 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.h
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
@@ -33,8 +33,6 @@
#include "impd_apicmd_standards.h"
#include "impd_drc_config_params.h"
-#define MAX_MEM_ALLOCS 100
-
extern "C" IA_ERRORCODE ixheaacd_dec_api(pVOID p_ia_module_obj, WORD32 i_cmd, WORD32 i_idx,
pVOID pv_value);
extern "C" IA_ERRORCODE ia_drc_dec_api(pVOID p_ia_module_obj, WORD32 i_cmd, WORD32 i_idx,
@@ -80,18 +78,19 @@
enum { NONE, AWAITING_DISABLED, AWAITING_ENABLED } mOutputPortSettingsChange;
void initPorts();
- status_t initDecoder();
+ IA_ERRORCODE initDecoder();
bool isConfigured() const;
- int drainDecoder();
- int initXAACDecoder();
- int deInitXAACDecoder();
+ IA_ERRORCODE drainDecoder();
+ IA_ERRORCODE initXAACDecoder();
+ IA_ERRORCODE deInitXAACDecoder();
+ IA_ERRORCODE initMPEGDDDrc();
+ IA_ERRORCODE deInitMPEGDDDrc();
+ IA_ERRORCODE configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength);
+ IA_ERRORCODE configMPEGDDrc();
+ IA_ERRORCODE decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength,
+ int32_t* bytesConsumed, int32_t* outBytes);
- int configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength);
- int configMPEGDDrc();
- int decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength, int32_t* bytesConsumed,
- int32_t* outBytes);
-
- int configflushDecode();
+ IA_ERRORCODE configflushDecode();
IA_ERRORCODE getXAACStreamInfo();
IA_ERRORCODE setXAACDRCInfo(int32_t drcCut, int32_t drcBoost, int32_t drcRefLevel,
int32_t drcHeavyCompression
@@ -120,9 +119,8 @@
int8_t* mDrcOutBuf;
int32_t mMpegDDRCPresent;
int32_t mDRCFlag;
-
- void* mMemoryArray[MAX_MEM_ALLOCS];
- int32_t mMallocCount;
+ Vector<void*> mMemoryVec;
+ Vector<void*> mDrcMemoryVec;
DISALLOW_EVIL_CONSTRUCTORS(SoftXAAC);
};
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 86bd9d6..d136d9e 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -27,6 +27,7 @@
#include "libyuv/convert_from.h"
#include "libyuv/convert_argb.h"
+#include "libyuv/planar_functions.h"
#include "libyuv/video_common.h"
#include <functional>
#include <sys/time.h>
@@ -91,10 +92,17 @@
case OMX_COLOR_FormatCbYCrY:
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
- case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
return mDstFormat == OMX_COLOR_Format16bitRGB565;
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+#ifdef USE_LIBYUV
+ return mDstFormat == OMX_COLOR_Format16bitRGB565
+ || mDstFormat == OMX_COLOR_Format32BitRGBA8888;
+#else
+ return mDstFormat == OMX_COLOR_Format16bitRGB565;
+#endif
+
default:
return false;
}
@@ -236,7 +244,11 @@
break;
case OMX_COLOR_FormatYUV420SemiPlanar:
+#ifdef USE_LIBYUV
+ err = convertYUV420SemiPlanarUseLibYUV(src, dst);
+#else
err = convertYUV420SemiPlanar(src, dst);
+#endif
break;
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
@@ -365,6 +377,36 @@
return OK;
}
+status_t ColorConverter::convertYUV420SemiPlanarUseLibYUV(
+ const BitmapParams &src, const BitmapParams &dst) {
+ uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+
+ const uint8_t *src_y =
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
+
+ const uint8_t *src_u =
+ (const uint8_t *)src.mBits + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * src.mStride + src.mCropLeft;
+
+ switch (mDstFormat) {
+ case OMX_COLOR_Format16bitRGB565:
+ libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+ dst.mStride, src.cropWidth(), src.cropHeight());
+ break;
+
+ case OMX_COLOR_Format32BitRGBA8888:
+ libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+ dst.mStride, src.cropWidth(), src.cropHeight());
+ break;
+
+ default:
+ return ERROR_UNSUPPORTED;
+ }
+
+ return OK;
+}
+
std::function<void (void *, void *, void *, size_t,
signed *, signed *, signed *, signed *)>
getReadFromSrc(OMX_COLOR_FORMATTYPE srcFormat) {
@@ -852,7 +894,7 @@
const uint8_t *src_u =
(const uint8_t *)src.mBits + src.mHeight * src.mStride +
- src.mCropTop * src.mStride / 2 + src.mCropLeft;
+ (src.mCropTop / 2) * src.mStride + src.mCropLeft;
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index 6bfab16..751b053 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -36,8 +36,6 @@
shared_libs: [
"liblog",
- "libstagefright_foundation",
- "libutils",
],
header_libs: ["libmedia_headers"],
}
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 78d410a..c4a072b 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -29,7 +29,6 @@
shared_libs: [
"liblog",
- "libbinder",
"libcrypto",
"libcutils",
"libmedia",
@@ -38,10 +37,11 @@
"libstagefright",
"libstagefright_foundation",
"libutils",
- "libhidlallocatorutils",
"libhidlbase",
+ "libhidlmemory",
"android.hardware.cas@1.0",
"android.hardware.cas.native@1.0",
+ "android.hidl.allocator@1.0",
],
header_libs: [
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 80125d4..9b2853e 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -237,6 +237,8 @@
android_native_rect_t mLastNativeWindowCrop;
int32_t mLastNativeWindowDataSpace;
HDRStaticInfo mLastHDRStaticInfo;
+ sp<ABuffer> mHdr10PlusScratchBuffer;
+ sp<ABuffer> mLastHdr10PlusBuffer;
sp<AMessage> mConfigFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
@@ -290,6 +292,7 @@
OMX_INDEXTYPE mDescribeColorAspectsIndex;
OMX_INDEXTYPE mDescribeHDRStaticInfoIndex;
+ OMX_INDEXTYPE mDescribeHDR10PlusInfoIndex;
std::shared_ptr<ACodecBufferChannel> mBufferChannel;
@@ -424,6 +427,11 @@
// unspecified values.
void onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects);
+ // notifies the codec that the config with |configIndex| has changed, the value
+ // can be queried by OMX getConfig, and the config should be applied to the next
+ // output buffer notified after this callback.
+ void onConfigUpdate(OMX_INDEXTYPE configIndex);
+
// gets index or sets it to 0 on error. Returns error from codec.
status_t initDescribeHDRStaticInfoIndex();
@@ -435,12 +443,22 @@
// sets |params|. Returns the codec error.
status_t setHDRStaticInfo(const DescribeHDRStaticInfoParams ¶ms);
+ // sets |hdr10PlusInfo|. Returns the codec error.
+ status_t setHdr10PlusInfo(const sp<ABuffer> &hdr10PlusInfo);
+
// gets |params|. Returns the codec error.
status_t getHDRStaticInfo(DescribeHDRStaticInfoParams ¶ms);
// gets HDR static information for the video encoder/decoder port and sets them into |format|.
status_t getHDRStaticInfoForVideoCodec(OMX_U32 portIndex, sp<AMessage> &format);
+ // gets DescribeHDR10PlusInfoParams params. If |paramSizeUsed| is zero, it's
+ // possible that the returned DescribeHDR10PlusInfoParams only has the
+ // nParamSizeUsed field updated, because the size of the storage is insufficient.
+ // In this case, getHDR10PlusInfo() should be called again with |paramSizeUsed|
+ // specified to the previous returned value.
+ DescribeHDR10PlusInfoParams* getHDR10PlusInfo(size_t paramSizeUsed = 0);
+
typedef struct drcParams {
int32_t drcCut;
int32_t drcBoost;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 206d322..b0e32d0 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -69,6 +69,7 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ status_t getPortId(audio_port_handle_t *portId) const;
protected:
virtual ~AudioSource();
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 6d4c1bf..75b0d8e 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -90,6 +90,9 @@
status_t convertYUV420PlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst);
+ status_t convertYUV420SemiPlanarUseLibYUV(
+ const BitmapParams &src, const BitmapParams &dst);
+
status_t convertYUV420Planar16(
const BitmapParams &src, const BitmapParams &dst);
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index f18940d..1abef8c 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -110,6 +110,7 @@
uint32_t mInterleaveDurationUs;
int32_t mTimeScale;
int64_t mStartTimestampUs;
+ int32_t mStartTimeOffsetBFramesUs; // Start time offset when B Frames are present
int mLatitudex10000;
int mLongitudex10000;
bool mAreGeoTagsAvailable;
@@ -129,6 +130,7 @@
void setStartTimestampUs(int64_t timeUs);
int64_t getStartTimestampUs(); // Not const
+ int32_t getStartTimeOffsetBFramesUs();
status_t startTracks(MetaData *params);
size_t numTracks();
int64_t estimateMoovBoxSize(int32_t bitRate);
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
index 6f3e57e..79f18d5 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractor.h
@@ -90,22 +90,7 @@
class MediaExtractorCUnwrapper : public MediaExtractor {
public:
- MediaExtractorCUnwrapper() {};
- virtual size_t countTracks() = 0;
- virtual MediaTrack *getTrack(size_t index) = 0;
- virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0) = 0;
- virtual status_t getMetaData(MetaDataBase& meta) = 0;
- virtual const char * name() = 0;
- virtual uint32_t flags() const = 0;
- virtual status_t setMediaCas(const uint8_t* casToken, size_t size) = 0;
-protected:
- virtual ~MediaExtractorCUnwrapper() {};
-};
-
-
-class MediaExtractorCUnwrapperV1 : public MediaExtractorCUnwrapper {
-public:
- explicit MediaExtractorCUnwrapperV1(CMediaExtractor *plugin);
+ explicit MediaExtractorCUnwrapper(CMediaExtractor *plugin);
virtual size_t countTracks();
virtual MediaTrack *getTrack(size_t index);
virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
@@ -114,43 +99,11 @@
virtual uint32_t flags() const;
virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
protected:
- virtual ~MediaExtractorCUnwrapperV1();
+ virtual ~MediaExtractorCUnwrapper();
private:
CMediaExtractor *plugin;
};
-class MediaExtractorCUnwrapperV2 : public MediaExtractorCUnwrapper {
-public:
- explicit MediaExtractorCUnwrapperV2(CMediaExtractorV2 *plugin);
- virtual size_t countTracks();
- virtual MediaTrack *getTrack(size_t index);
- virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
- virtual status_t getMetaData(MetaDataBase& meta);
- virtual const char * name();
- virtual uint32_t flags() const;
- virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
-protected:
- virtual ~MediaExtractorCUnwrapperV2();
-private:
- CMediaExtractorV2 *plugin;
-};
-
-class MediaExtractorCUnwrapperV3 : public MediaExtractorCUnwrapper {
-public:
- explicit MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin);
- virtual size_t countTracks();
- virtual MediaTrack *getTrack(size_t index);
- virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
- virtual status_t getMetaData(MetaDataBase& meta);
- virtual const char * name();
- virtual uint32_t flags() const;
- virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
-protected:
- virtual ~MediaExtractorCUnwrapperV3();
-private:
- CMediaExtractorV3 *plugin;
-};
-
} // namespace android
#endif // MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h b/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
index 9d97dfd..fcfcbec 100644
--- a/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
+++ b/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014, The Android Open Source Project
+ * Copyright 2018, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,87 +17,24 @@
#ifndef VIDEO_FRAME_SCHEDULER_H_
#define VIDEO_FRAME_SCHEDULER_H_
-#include <utils/RefBase.h>
-#include <utils/Timers.h>
-
-#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/VideoFrameSchedulerBase.h>
namespace android {
class ISurfaceComposer;
-struct VideoFrameScheduler : public RefBase {
+struct VideoFrameScheduler : public VideoFrameSchedulerBase {
VideoFrameScheduler();
-
- // (re)initialize scheduler
- void init(float videoFps = -1);
- // use in case of video render-time discontinuity, e.g. seek
- void restart();
- // get adjusted nanotime for a video frame render at renderTime
- nsecs_t schedule(nsecs_t renderTime);
-
- // returns the vsync period for the main display
- nsecs_t getVsyncPeriod();
-
- // returns the current frames-per-second, or 0.f if not primed
- float getFrameRate();
-
- void release();
-
- static const size_t kHistorySize = 8;
+ void release() override;
protected:
virtual ~VideoFrameScheduler();
private:
- struct PLL {
- PLL();
-
- // reset PLL to new PLL
- void reset(float fps = -1);
- // keep current estimate, but restart phase
- void restart();
- // returns period or 0 if not yet primed
- nsecs_t addSample(nsecs_t time);
- nsecs_t getPeriod() const;
-
- private:
- nsecs_t mPeriod;
- nsecs_t mPhase;
-
- bool mPrimed; // have an estimate for the period
- size_t mSamplesUsedForPriming;
-
- nsecs_t mLastTime; // last input time
- nsecs_t mRefitAt; // next input time to fit at
-
- size_t mNumSamples; // can go past kHistorySize
- nsecs_t mTimes[kHistorySize];
-
- void test();
- // returns whether fit was successful
- bool fit(nsecs_t phase, nsecs_t period, size_t numSamples,
- int64_t *a, int64_t *b, int64_t *err);
- void prime(size_t numSamples);
- };
-
- void updateVsync();
-
- nsecs_t mVsyncTime; // vsync timing from display
- nsecs_t mVsyncPeriod;
- nsecs_t mVsyncRefreshAt; // next time to refresh timing info
-
- nsecs_t mLastVsyncTime; // estimated vsync time for last frame
- nsecs_t mTimeCorrection; // running adjustment
-
- PLL mPll; // PLL for video frame rate based on render time
-
+ void updateVsync() override;
sp<ISurfaceComposer> mComposer;
-
- DISALLOW_EVIL_CONSTRUCTORS(VideoFrameScheduler);
};
} // namespace android
#endif // VIDEO_FRAME_SCHEDULER_H_
-
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h b/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h
new file mode 100644
index 0000000..be911cc
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_FRAME_SCHEDULER_2_H_
+#define VIDEO_FRAME_SCHEDULER_2_H_
+
+#include <media/stagefright/VideoFrameSchedulerBase.h>
+
+namespace android {
+
+class VsyncTracker;
+struct ChoreographerThread;
+
+struct VideoFrameScheduler2 : public VideoFrameSchedulerBase {
+ VideoFrameScheduler2();
+ void release() override;
+
+protected:
+ virtual ~VideoFrameScheduler2();
+
+private:
+ void updateVsync() override;
+
+ long mAppVsyncOffset;
+ long mSfVsyncOffset;
+ sp<VsyncTracker> mVsyncTracker;
+ sp<ChoreographerThread> mChoreographerThread;
+ Mutex mLock;
+};
+
+} // namespace android
+
+#endif // VIDEO_FRAME_SCHEDULER_2_H_
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h b/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h
new file mode 100644
index 0000000..ff5f716
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_FRAME_SCHEDULER_BASE_H_
+#define VIDEO_FRAME_SCHEDULER_BASE_H_
+
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct VideoFrameSchedulerBase : public RefBase {
+ VideoFrameSchedulerBase();
+
+ // (re)initialize scheduler
+ void init(float videoFps = -1);
+ // use in case of video render-time discontinuity, e.g. seek
+ void restart();
+ // get adjusted nanotime for a video frame render at renderTime
+ nsecs_t schedule(nsecs_t renderTime);
+
+ // returns the vsync period for the main display
+ nsecs_t getVsyncPeriod();
+
+ // returns the current frames-per-second, or 0.f if not primed
+ float getFrameRate();
+
+ virtual void release() = 0;
+
+ static const size_t kHistorySize = 8;
+ static const nsecs_t kNanosIn1s = 1000000000;
+ static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60; // 60Hz
+ static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s; // 1 sec
+
+protected:
+ virtual ~VideoFrameSchedulerBase();
+
+ nsecs_t mVsyncTime; // vsync timing from display
+ nsecs_t mVsyncPeriod;
+ nsecs_t mVsyncRefreshAt; // next time to refresh timing info
+
+private:
+ struct PLL {
+ PLL();
+
+ // reset PLL to new PLL
+ void reset(float fps = -1);
+ // keep current estimate, but restart phase
+ void restart();
+ // returns period or 0 if not yet primed
+ nsecs_t addSample(nsecs_t time);
+ nsecs_t getPeriod() const;
+
+ private:
+ nsecs_t mPeriod;
+ nsecs_t mPhase;
+
+ bool mPrimed; // have an estimate for the period
+ size_t mSamplesUsedForPriming;
+
+ nsecs_t mLastTime; // last input time
+ nsecs_t mRefitAt; // next input time to fit at
+
+ size_t mNumSamples; // can go past kHistorySize
+ nsecs_t mTimes[kHistorySize];
+
+ void test();
+ // returns whether fit was successful
+ bool fit(nsecs_t phase, nsecs_t period, size_t numSamples,
+ int64_t *a, int64_t *b, int64_t *err);
+ void prime(size_t numSamples);
+ };
+
+ virtual void updateVsync() = 0;
+
+ nsecs_t mLastVsyncTime; // estimated vsync time for last frame
+ nsecs_t mTimeCorrection; // running adjustment
+ PLL mPll; // PLL for video frame rate based on render time
+
+ DISALLOW_EVIL_CONSTRUCTORS(VideoFrameSchedulerBase);
+};
+
+} // namespace android
+
+#endif // VIDEO_FRAME_SCHEDULER_BASE_H_
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 590131e..345f85d 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -23,10 +23,10 @@
#include "ESQueue.h"
#include <android/hardware/cas/native/1.0/IDescrambler.h>
-#include <binder/IMemory.h>
-#include <binder/MemoryDealer.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
#include <cutils/native_handle.h>
-#include <hidlmemory/FrameworkUtils.h>
+#include <hidlmemory/mapping.h>
#include <media/cas/DescramblerAPI.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -46,12 +46,13 @@
#include <inttypes.h>
namespace android {
-using hardware::fromHeap;
using hardware::hidl_string;
using hardware::hidl_vec;
-using hardware::HidlMemory;
+using hardware::hidl_memory;
using namespace hardware::cas::V1_0;
using namespace hardware::cas::native::V1_0;
+typedef hidl::allocator::V1_0::IAllocator TAllocator;
+typedef hidl::memory::V1_0::IMemory TMemory;
// I want the expression "y" evaluated even if verbose logging is off.
#define MY_LOGV(x, y) \
@@ -208,9 +209,8 @@
bool mScrambled;
bool mSampleEncrypted;
sp<AMessage> mSampleAesKeyItem;
- sp<IMemory> mMem;
- sp<MemoryDealer> mDealer;
- sp<HidlMemory> mHidlMemory;
+ sp<TMemory> mHidlMemory;
+ sp<TAllocator> mHidlAllocator;
hardware::cas::native::V1_0::SharedBuffer mDescramblerSrcBuffer;
sp<ABuffer> mDescrambledBuffer;
List<SubSampleInfo> mSubSamples;
@@ -975,16 +975,43 @@
mBuffer == NULL ? 0 : mBuffer->capacity(), neededSize, mScrambled);
sp<ABuffer> newBuffer, newScrambledBuffer;
- sp<IMemory> newMem;
- sp<MemoryDealer> newDealer;
+ sp<TMemory> newMem;
if (mScrambled) {
- size_t alignment = MemoryDealer::getAllocationAlignment();
- neededSize = (neededSize + (alignment - 1)) & ~(alignment - 1);
- // Align to multiples of 64K.
- neededSize = (neededSize + 65535) & ~65535;
- newDealer = new MemoryDealer(neededSize, "ATSParser");
- newMem = newDealer->allocate(neededSize);
- newScrambledBuffer = new ABuffer(newMem->pointer(), newMem->size());
+ if (mHidlAllocator == nullptr) {
+ mHidlAllocator = TAllocator::getService("ashmem");
+ if (mHidlAllocator == nullptr) {
+ ALOGE("[stream %d] can't get hidl allocator", mElementaryPID);
+ return false;
+ }
+ }
+
+ hidl_memory hidlMemToken;
+ bool success;
+ auto transStatus = mHidlAllocator->allocate(
+ neededSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
+
+ if (!transStatus.isOk()) {
+ ALOGE("[stream %d] hidl allocator failed at the transport: %s",
+ mElementaryPID, transStatus.description().c_str());
+ return false;
+ }
+ if (!success) {
+ ALOGE("[stream %d] hidl allocator failed", mElementaryPID);
+ return false;
+ }
+ newMem = mapMemory(hidlMemToken);
+ if (newMem == nullptr || newMem->getPointer() == nullptr) {
+ ALOGE("[stream %d] hidl failed to map memory", mElementaryPID);
+ return false;
+ }
+
+ newScrambledBuffer = new ABuffer(newMem->getPointer(), newMem->getSize());
if (mDescrambledBuffer != NULL) {
memcpy(newScrambledBuffer->data(),
@@ -993,24 +1020,15 @@
} else {
newScrambledBuffer->setRange(0, 0);
}
- mMem = newMem;
- mDealer = newDealer;
+ mHidlMemory = newMem;
mDescrambledBuffer = newScrambledBuffer;
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = newMem->getMemory(&offset, &size);
- if (heap == NULL) {
- return false;
- }
+ mDescramblerSrcBuffer.heapBase = hidlMemToken;
+ mDescramblerSrcBuffer.offset = 0ULL;
+ mDescramblerSrcBuffer.size = (uint64_t)neededSize;
- mHidlMemory = fromHeap(heap);
- mDescramblerSrcBuffer.heapBase = *mHidlMemory;
- mDescramblerSrcBuffer.offset = (uint64_t) offset;
- mDescramblerSrcBuffer.size = (uint64_t) size;
-
- ALOGD("[stream %d] created shared buffer for descrambling, offset %zd, size %zu",
- mElementaryPID, offset, size);
+ ALOGD("[stream %d] created shared buffer for descrambling, size %zu",
+ mElementaryPID, neededSize);
} else {
// Align to multiples of 64K.
neededSize = (neededSize + 65535) & ~65535;
@@ -1498,7 +1516,7 @@
return UNKNOWN_ERROR;
}
- if (mDescrambledBuffer == NULL || mMem == NULL) {
+ if (mDescrambledBuffer == NULL || mHidlMemory == NULL) {
ALOGE("received scrambled packets without shared memory!");
return UNKNOWN_ERROR;
@@ -1592,9 +1610,9 @@
detailedError = _detailedError;
});
- if (!returnVoid.isOk()) {
- ALOGE("[stream %d] descramble failed, trans=%s",
- mElementaryPID, returnVoid.description().c_str());
+ if (!returnVoid.isOk() || status != Status::OK) {
+ ALOGE("[stream %d] descramble failed, trans=%s, status=%d",
+ mElementaryPID, returnVoid.description().c_str(), status);
return UNKNOWN_ERROR;
}
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index e516cf1..a507b91 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -30,9 +30,10 @@
shared_libs: [
"libcrypto",
"libmedia",
- "libhidlallocatorutils",
+ "libhidlmemory",
"android.hardware.cas.native@1.0",
"android.hidl.memory@1.0",
+ "android.hidl.allocator@1.0",
],
header_libs: [
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 8a76de3..362b7f5 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -53,7 +53,6 @@
"libhidlbase",
"libhidlmemory",
"libhidltransport",
- "libnativewindow", // TODO(b/62923479): use header library
"libvndksupport",
"android.hardware.media.omx@1.0",
"android.hardware.graphics.bufferqueue@1.0",
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 55afe04..ddb459f 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -34,7 +34,8 @@
mLooper(new ALooper),
mHandler(new AHandlerReflector<SimpleSoftOMXComponent>(this)),
mState(OMX_StateLoaded),
- mTargetState(OMX_StateLoaded) {
+ mTargetState(OMX_StateLoaded),
+ mFrameConfig(false) {
mLooper->setName(name);
mLooper->registerHandler(mHandler);
@@ -204,6 +205,21 @@
}
}
+OMX_ERRORTYPE SimpleSoftOMXComponent::internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig) {
+ return OMX_ErrorUndefined;
+}
+
+OMX_ERRORTYPE SimpleSoftOMXComponent::setConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ bool frameConfig = mFrameConfig;
+ OMX_ERRORTYPE err = internalSetConfig(index, params, &frameConfig);
+ if (err == OMX_ErrorNone) {
+ mFrameConfig = frameConfig;
+ }
+ return err;
+}
+
OMX_ERRORTYPE SimpleSoftOMXComponent::useBuffer(
OMX_BUFFERHEADERTYPE **header,
OMX_U32 portIndex,
@@ -336,6 +352,10 @@
OMX_BUFFERHEADERTYPE *buffer) {
sp<AMessage> msg = new AMessage(kWhatEmptyThisBuffer, mHandler);
msg->setPointer("header", buffer);
+ if (mFrameConfig) {
+ msg->setInt32("frame-config", mFrameConfig);
+ mFrameConfig = false;
+ }
msg->post();
return OMX_ErrorNone;
@@ -378,6 +398,10 @@
{
OMX_BUFFERHEADERTYPE *header;
CHECK(msg->findPointer("header", (void **)&header));
+ int32_t frameConfig;
+ if (!msg->findInt32("frame-config", &frameConfig)) {
+ frameConfig = 0;
+ }
CHECK(mState == OMX_StateExecuting && mTargetState == mState);
@@ -393,6 +417,7 @@
CHECK(!buffer->mOwnedByUs);
buffer->mOwnedByUs = true;
+ buffer->mFrameConfig = (bool)frameConfig;
CHECK((msgType == kWhatEmptyThisBuffer
&& port->mDef.eDir == OMX_DirInput)
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index f9f7ec2..e853da9 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -602,13 +602,40 @@
return OMX_ErrorNone;
}
+ case kDescribeHdr10PlusInfoIndex:
+ {
+ if (!supportDescribeHdr10PlusInfo()) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ if (mHdr10PlusOutputs.size() > 0) {
+ auto it = mHdr10PlusOutputs.begin();
+
+ auto info = (*it).get();
+
+ DescribeHDR10PlusInfoParams* outParams =
+ (DescribeHDR10PlusInfoParams *)params;
+
+ outParams->nParamSizeUsed = info->size();
+
+ // If the buffer provided by the client does not have enough
+ // storage, return the size only and do not remove the param yet.
+ if (outParams->nParamSize >= info->size()) {
+ memcpy(outParams->nValue, info->data(), info->size());
+ mHdr10PlusOutputs.erase(it);
+ }
+ return OMX_ErrorNone;
+ }
+ return OMX_ErrorUnderflow;
+ }
+
default:
return OMX_ErrorUnsupportedIndex;
}
}
-OMX_ERRORTYPE SoftVideoDecoderOMXComponent::setConfig(
- OMX_INDEXTYPE index, const OMX_PTR params){
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig){
switch ((int)index) {
case kDescribeColorAspectsIndex:
{
@@ -658,11 +685,55 @@
return OMX_ErrorNone;
}
+ case kDescribeHdr10PlusInfoIndex:
+ {
+ if (!supportDescribeHdr10PlusInfo()) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ const DescribeHDR10PlusInfoParams* inParams =
+ (DescribeHDR10PlusInfoParams *)params;
+
+ if (*frameConfig) {
+ // This is a request to append to the current frame config set.
+ // For now, we only support kDescribeHdr10PlusInfoIndex, which
+ // we simply replace with the last set value.
+ if (mHdr10PlusInputs.size() > 0) {
+ *(--mHdr10PlusInputs.end()) = ABuffer::CreateAsCopy(
+ inParams->nValue, inParams->nParamSizeUsed);
+ } else {
+ ALOGW("Ignoring kDescribeHdr10PlusInfoIndex: append to "
+ "frame config while no frame config is present");
+ }
+ } else {
+ // This is a frame config, setting *frameConfig to true so that
+ // the client marks the next queued input frame to apply it.
+ *frameConfig = true;
+ mHdr10PlusInputs.push_back(ABuffer::CreateAsCopy(
+ inParams->nValue, inParams->nParamSizeUsed));
+ }
+ return OMX_ErrorNone;
+ }
+
default:
return OMX_ErrorUnsupportedIndex;
}
}
+sp<ABuffer> SoftVideoDecoderOMXComponent::dequeueInputFrameConfig() {
+ auto it = mHdr10PlusInputs.begin();
+ sp<ABuffer> info = *it;
+ mHdr10PlusInputs.erase(it);
+ return info;
+}
+
+void SoftVideoDecoderOMXComponent::queueOutputFrameConfig(const sp<ABuffer> &info) {
+ mHdr10PlusOutputs.push_back(info);
+ notify(OMX_EventConfigUpdate,
+ kOutputPortIndex,
+ kDescribeHdr10PlusInfoIndex,
+ NULL);
+}
OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
const char *name, OMX_INDEXTYPE *index) {
@@ -677,6 +748,10 @@
&& supportDescribeHdrStaticInfo()) {
*(int32_t*)index = kDescribeHdrStaticInfoIndex;
return OMX_ErrorNone;
+ } else if (!strcmp(name, "OMX.google.android.index.describeHDR10PlusInfo")
+ && supportDescribeHdr10PlusInfo()) {
+ *(int32_t*)index = kDescribeHdr10PlusInfoIndex;
+ return OMX_ErrorNone;
}
return SimpleSoftOMXComponent::getExtensionIndex(name, index);
@@ -694,6 +769,10 @@
return false;
}
+bool SoftVideoDecoderOMXComponent::supportDescribeHdr10PlusInfo() {
+ return false;
+}
+
void SoftVideoDecoderOMXComponent::onReset() {
mOutputPortSettingsChange = NONE;
}
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
index 1d1f2bd..6bbedda 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
@@ -20,6 +20,7 @@
#include "SoftOMXComponent.h"
+#include <atomic>
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <utils/RefBase.h>
#include <utils/threads.h>
@@ -28,6 +29,7 @@
namespace android {
struct ALooper;
+struct ABuffer;
struct CodecProfileLevel {
OMX_U32 mProfile;
@@ -49,6 +51,7 @@
struct BufferInfo {
OMX_BUFFERHEADERTYPE *mHeader;
bool mOwnedByUs;
+ bool mFrameConfig;
};
struct PortInfo {
@@ -76,6 +79,9 @@
virtual OMX_ERRORTYPE internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params);
+ virtual OMX_ERRORTYPE internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
+
virtual void onQueueFilled(OMX_U32 portIndex);
List<BufferInfo *> &getPortQueue(OMX_U32 portIndex);
@@ -101,6 +107,7 @@
OMX_STATETYPE mTargetState;
Vector<PortInfo> mPorts;
+ std::atomic_bool mFrameConfig;
bool isSetParameterAllowed(
OMX_INDEXTYPE index, const OMX_PTR params) const;
@@ -114,6 +121,9 @@
virtual OMX_ERRORTYPE setParameter(
OMX_INDEXTYPE index, const OMX_PTR params);
+ virtual OMX_ERRORTYPE setConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
virtual OMX_ERRORTYPE useBuffer(
OMX_BUFFERHEADERTYPE **buffer,
OMX_U32 portIndex,
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
index 56fc691..3b381ce 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
@@ -20,6 +20,7 @@
#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/IOMX.h>
@@ -28,6 +29,7 @@
#include <utils/RefBase.h>
#include <utils/threads.h>
#include <utils/Vector.h>
+#include <utils/List.h>
namespace android {
@@ -48,6 +50,7 @@
enum {
kDescribeColorAspectsIndex = kPrepareForAdaptivePlaybackIndex + 1,
kDescribeHdrStaticInfoIndex = kPrepareForAdaptivePlaybackIndex + 2,
+ kDescribeHdr10PlusInfoIndex = kPrepareForAdaptivePlaybackIndex + 3,
};
enum {
@@ -68,8 +71,8 @@
virtual OMX_ERRORTYPE getConfig(
OMX_INDEXTYPE index, OMX_PTR params);
- virtual OMX_ERRORTYPE setConfig(
- OMX_INDEXTYPE index, const OMX_PTR params);
+ virtual OMX_ERRORTYPE internalSetConfig(
+ OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
virtual OMX_ERRORTYPE getExtensionIndex(
const char *name, OMX_INDEXTYPE *index);
@@ -80,6 +83,8 @@
virtual bool supportDescribeHdrStaticInfo();
+ virtual bool supportDescribeHdr10PlusInfo();
+
// This function sets both minimum buffer count and actual buffer count of
// input port to be |numInputBuffers|. It will also set both minimum buffer
// count and actual buffer count of output port to be |numOutputBuffers|.
@@ -166,6 +171,9 @@
// Helper function to dump the ColorAspects.
void dumpColorAspects(const ColorAspects &colorAspects);
+ sp<ABuffer> dequeueInputFrameConfig();
+ void queueOutputFrameConfig(const sp<ABuffer> &info);
+
private:
uint32_t mMinInputBufferSize;
uint32_t mMinCompressionRatio;
@@ -174,6 +182,9 @@
OMX_VIDEO_CODINGTYPE mCodingType;
const CodecProfileLevel *mProfileLevels;
size_t mNumProfileLevels;
+ typedef List<sp<ABuffer> > Hdr10PlusInfoList;
+ Hdr10PlusInfoList mHdr10PlusInputs;
+ Hdr10PlusInfoList mHdr10PlusOutputs;
DISALLOW_EVIL_CONSTRUCTORS(SoftVideoDecoderOMXComponent);
};
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index b55dbb0..bebfb3b 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+ name: "libstagefright_xmlparser_headers",
+ export_include_dirs: ["include"],
+ vendor_available: true,
+}
+
cc_library_shared {
name: "libstagefright_xmlparser",
vendor_available: true,
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 6976950..73bd2ca 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -88,7 +88,6 @@
"libandroid",
"libandroid_runtime",
"libbinder",
- "libhwbinder",
"libhidlbase",
"libgui",
"libui",
@@ -141,10 +140,6 @@
],
shared_libs: [
- "libstagefright_foundation",
- "liblog",
- "libutils",
- "libcutils",
],
sanitize: {
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index e0af80d..fcb706d 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -294,9 +294,11 @@
EXPORT const char* AMEDIAFORMAT_KEY_COMPOSER = "composer";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE = "crypto-default-iv-size";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK = "crypto-encrypted-byte-block";
+EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES = "crypto-encrypted-sizes";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_IV = "crypto-iv";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_KEY = "crypto-key";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_MODE = "crypto-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES = "crypto-plain-sizes";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK = "crypto-skip-byte-block";
EXPORT const char* AMEDIAFORMAT_KEY_CSD = "csd";
EXPORT const char* AMEDIAFORMAT_KEY_CSD_0 = "csd-0";
@@ -361,6 +363,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TARGET_TIME = "target-time";
EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT = "temporal-layer-count";
EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID = "temporal-layer-id";
EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
@@ -374,6 +377,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_TITLE = "title";
EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
EXPORT const char* AMEDIAFORMAT_KEY_TRACK_INDEX = "track-index";
+EXPORT const char* AMEDIAFORMAT_KEY_VALID_SAMPLES = "valid-samples";
EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
EXPORT const char* AMEDIAFORMAT_KEY_YEAR = "year";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 2cd1d04..2551228 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -190,9 +190,11 @@
extern const char* AMEDIAFORMAT_KEY_COMPOSER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_IV __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_KEY __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_MODE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CSD_AVC __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CSD_HEVC __INTRODUCED_IN(29);
@@ -217,12 +219,14 @@
extern const char* AMEDIAFORMAT_KEY_PSSH __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_SAR_HEIGHT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_SAR_WIDTH __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_TARGET_TIME __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_HEIGHT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_TIME __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_WIDTH __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_TITLE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_VALID_SAMPLES __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_YEAR __INTRODUCED_IN(29);
#endif /* __ANDROID_API__ >= 29 */
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 3567899..c50084e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -58,9 +58,11 @@
AMEDIAFORMAT_KEY_COMPOSER; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK; # var introduced=29
+ AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_IV; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_KEY; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_MODE; # var introduced=29
+ AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK; # var introduced=29
AMEDIAFORMAT_KEY_CSD; # var introduced=28
AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
@@ -124,6 +126,7 @@
AMEDIAFORMAT_KEY_SEI; # var introduced=28
AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+ AMEDIAFORMAT_KEY_TARGET_TIME; # var introduced=29
AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT; # var introduced=29
AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
@@ -137,6 +140,7 @@
AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_VALID_SAMPLES; # var introduced=29
AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
AMEDIAFORMAT_KEY_YEAR; # var introduced=29
AMediaCodecActionCode_isRecoverable; # introduced=28
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index b05e022..a11602b 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -27,7 +27,6 @@
],
shared_libs: [
"libbinder",
- "libcutils",
"liblog",
"libutils",
"libmemunreachable",
diff --git a/packages/MediaComponents/apex/Android.bp b/packages/MediaComponents/apex/Android.bp
index e797e14..d89eb77 100644
--- a/packages/MediaComponents/apex/Android.bp
+++ b/packages/MediaComponents/apex/Android.bp
@@ -9,6 +9,8 @@
// "Refusing to generate code with unstructured parcelables."
"java/android/media/MediaDescription.aidl",
"java/android/media/MediaMetadata.aidl",
+ // TODO(insun): check why MediaParceledListSlice.aidl should be added here
+ "java/android/media/MediaParceledListSlice.aidl",
"java/android/media/Rating.aidl",
"java/android/media/browse/MediaBrowser.aidl",
"java/android/media/session/MediaSession.aidl",
diff --git a/packages/MediaComponents/apex/java/android/media/MediaMetadata.java b/packages/MediaComponents/apex/java/android/media/MediaMetadata.java
index 33e6916..adfd20b 100644
--- a/packages/MediaComponents/apex/java/android/media/MediaMetadata.java
+++ b/packages/MediaComponents/apex/java/android/media/MediaMetadata.java
@@ -422,9 +422,7 @@
}
private MediaMetadata(Parcel in) {
- //TODO(b/119789387): Resolve hidden API usage: Bundle#setDefusable
- //mBundle = Bundle.setDefusable(in.readBundle(), true);
- mBundle = new Bundle(); //TODO:remove this.
+ mBundle = in.readBundle();
}
/**
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
new file mode 100644
index 0000000..228ea9c
--- /dev/null
+++ b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
@@ -0,0 +1,19 @@
+/* Copyright (C) 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/** @hide */
+parcelable MediaParceledListSlice;
\ No newline at end of file
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
new file mode 100644
index 0000000..ec3fdb7
--- /dev/null
+++ b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.annotation.UnsupportedAppUsage;
+import android.os.Binder;
+import android.os.Build;
+import android.os.IBinder;
+import android.os.Parcel;
+import android.os.Parcelable;
+import android.os.RemoteException;
+import android.util.Log;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Transfer a large list of objects across an IPC. Splits into multiple transactions if needed.
+ * Note: Only use classes declared final in order to avoid subclasses overriding reading/writing
+ * parcel logic.
+ *
+ * TODO: Add test for sending large data
+ * @hide
+ */
+public class MediaParceledListSlice<T extends Parcelable> implements Parcelable {
+ private static final String TAG = "MediaParceledListSlice";
+ private static final boolean DEBUG = false;
+
+ private static final int MAX_IPC_SIZE = 64 * 1024; // IBinder.MAX_IPC_SIZE
+
+ final List<T> mList;
+
+ public MediaParceledListSlice(List<T> list) {
+ if (list == null) {
+ throw new IllegalArgumentException("list shouldn't be null");
+ }
+ mList = list;
+ }
+
+ MediaParceledListSlice(Parcel p) {
+ final int itemCount = p.readInt();
+ mList = new ArrayList<>(itemCount);
+ if (DEBUG) {
+ Log.d(TAG, "Retrieving " + itemCount + " items");
+ }
+ if (itemCount <= 0) {
+ return;
+ }
+
+ int i = 0;
+ while (i < itemCount) {
+ if (p.readInt() == 0) {
+ break;
+ }
+
+ final T parcelable = p.readParcelable(null);
+ mList.add(parcelable);
+
+ if (DEBUG) {
+ Log.d(TAG, "Read inline #" + i + ": " + mList.get(mList.size() - 1));
+ }
+ i++;
+ }
+ if (i >= itemCount) {
+ return;
+ }
+ final IBinder retriever = p.readStrongBinder();
+ while (i < itemCount) {
+ if (DEBUG) {
+ Log.d(TAG, "Reading more @" + i + " of " + itemCount + ": retriever=" + retriever);
+ }
+ Parcel data = Parcel.obtain();
+ Parcel reply = Parcel.obtain();
+ data.writeInt(i);
+ try {
+ retriever.transact(IBinder.FIRST_CALL_TRANSACTION, data, reply, 0);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Failure retrieving array; only received " + i + " of " + itemCount, e);
+ return;
+ }
+ while (i < itemCount && reply.readInt() != 0) {
+ final T parcelable = reply.readParcelable(null);
+ mList.add(parcelable);
+
+ if (DEBUG) {
+ Log.d(TAG, "Read extra #" + i + ": " + mList.get(mList.size() - 1));
+ }
+ i++;
+ }
+ reply.recycle();
+ data.recycle();
+ }
+ }
+
+ public List<T> getList() {
+ return mList;
+ }
+
+ /**
+ * Write this to another Parcel. Note that this discards the internal Parcel
+ * and should not be used anymore. This is so we can pass this to a Binder
+ * where we won't have a chance to call recycle on this.
+ */
+ @Override
+ public void writeToParcel(Parcel dest, int flags) {
+ final int itemCount = mList.size();
+ dest.writeInt(itemCount);
+ if (DEBUG) {
+ Log.d(TAG, "Writing " + itemCount + " items");
+ }
+ if (itemCount > 0) {
+ int i = 0;
+ while (i < itemCount && dest.dataSize() < MAX_IPC_SIZE) {
+ dest.writeInt(1);
+
+ final T parcelable = mList.get(i);
+ dest.writeParcelable(parcelable, flags);
+
+ if (DEBUG) {
+ Log.d(TAG, "Wrote inline #" + i + ": " + mList.get(i));
+ }
+ i++;
+ }
+ if (i < itemCount) {
+ dest.writeInt(0);
+ Binder retriever = new Binder() {
+ @Override
+ protected boolean onTransact(int code, Parcel data, Parcel reply, int flags)
+ throws RemoteException {
+ if (code != FIRST_CALL_TRANSACTION) {
+ return super.onTransact(code, data, reply, flags);
+ }
+ int i = data.readInt();
+ if (DEBUG) {
+ Log.d(TAG, "Writing more @" + i + " of " + itemCount);
+ }
+ while (i < itemCount && reply.dataSize() < MAX_IPC_SIZE) {
+ reply.writeInt(1);
+
+ final T parcelable = mList.get(i);
+ reply.writeParcelable(parcelable, flags);
+
+ if (DEBUG) {
+ Log.d(TAG, "Wrote extra #" + i + ": " + mList.get(i));
+ }
+ i++;
+ }
+ if (i < itemCount) {
+ if (DEBUG) {
+ Log.d(TAG, "Breaking @" + i + " of " + itemCount);
+ }
+ reply.writeInt(0);
+ }
+ return true;
+ }
+ };
+ if (DEBUG) {
+ Log.d(TAG, "Breaking @" + i + " of " + itemCount + ": retriever=" + retriever);
+ }
+ dest.writeStrongBinder(retriever);
+ }
+ }
+ }
+
+ @Override
+ public int describeContents() {
+ int contents = 0;
+ final List<T> list = getList();
+ for (int i = 0; i < list.size(); i++) {
+ contents |= list.get(i).describeContents();
+ }
+ return contents;
+ }
+
+ public static final Parcelable.Creator<MediaParceledListSlice> CREATOR =
+ new Parcelable.Creator<MediaParceledListSlice>() {
+ @Override
+ public MediaParceledListSlice createFromParcel(Parcel in) {
+ return new MediaParceledListSlice(in);
+ }
+
+ @Override
+ public MediaParceledListSlice[] newArray(int size) {
+ return new MediaParceledListSlice[size];
+ }
+ };
+}
diff --git a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
index 4e091ad..b1b14c6 100644
--- a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
+++ b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
@@ -23,8 +23,8 @@
import android.content.Context;
import android.content.Intent;
import android.content.ServiceConnection;
-//import android.content.pm.ParceledListSlice;
import android.media.MediaDescription;
+import android.media.MediaParceledListSlice;
import android.media.session.MediaController;
import android.media.session.MediaSession;
import android.os.Binder;
@@ -652,10 +652,8 @@
});
}
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
private final void onLoadChildren(final IMediaBrowserServiceCallbacks callback,
- final String parentId, final ParceledListSlice list, final Bundle options) {
+ final String parentId, final MediaParceledListSlice list, final Bundle options) {
mHandler.post(new Runnable() {
@Override
public void run() {
@@ -699,7 +697,6 @@
}
});
}
- */
/**
* Return true if {@code callback} is the current ServiceCallbacks. Also logs if it's not.
@@ -1109,22 +1106,19 @@
}
}
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
@Override
- public void onLoadChildren(String parentId, ParceledListSlice list) {
+ public void onLoadChildren(String parentId, MediaParceledListSlice list) {
onLoadChildrenWithOptions(parentId, list, null);
}
@Override
- public void onLoadChildrenWithOptions(String parentId, ParceledListSlice list,
+ public void onLoadChildrenWithOptions(String parentId, MediaParceledListSlice list,
final Bundle options) {
MediaBrowser mediaBrowser = mMediaBrowser.get();
if (mediaBrowser != null) {
mediaBrowser.onLoadChildren(this, parentId, list, options);
}
}
- */
}
private static class Subscription {
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl b/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
index cbd93cb..14b1c64 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
@@ -16,9 +16,9 @@
package android.media.session;
import android.app.PendingIntent;
-import android.content.pm.ParceledListSlice;
//import android.media.AudioAttributes;
import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
import android.media.session.ISessionController;
import android.media.session.PlaybackState;
import android.media.session.MediaSession;
@@ -39,10 +39,9 @@
void destroy();
// These commands are for the TransportPerformer
- void setMetadata(in MediaMetadata metadata);
+ void setMetadata(in MediaMetadata metadata, long duration, String metadataDescription);
void setPlaybackState(in PlaybackState state);
- //TODO(b/119750807): Resolve hidden API usage ParceledListSlice.
- //void setQueue(in ParceledListSlice queue);
+ void setQueue(in MediaParceledListSlice queue);
void setQueueTitle(CharSequence title);
void setExtras(in Bundle extras);
void setRatingType(int type);
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
index 031a388..433b12f 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
@@ -17,8 +17,8 @@
import android.app.PendingIntent;
import android.content.Intent;
-//import android.content.pm.ParceledListSlice;
import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
import android.media.Rating;
import android.media.session.ISessionControllerCallback;
import android.media.session.MediaSession;
@@ -48,9 +48,9 @@
PendingIntent getLaunchPendingIntent();
long getFlags();
ParcelableVolumeInfo getVolumeAttributes();
- void adjustVolume(String packageName, ISessionControllerCallback caller,
+ void adjustVolume(String packageName, String opPackageName, ISessionControllerCallback caller,
boolean asSystemService, int direction, int flags);
- void setVolumeTo(String packageName, ISessionControllerCallback caller,
+ void setVolumeTo(String packageName, String opPackageName, ISessionControllerCallback caller,
int value, int flags);
// These commands are for the TransportControls
@@ -81,8 +81,7 @@
String action, in Bundle args);
MediaMetadata getMetadata();
PlaybackState getPlaybackState();
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- //ParceledListSlice getQueue();
+ MediaParceledListSlice getQueue();
CharSequence getQueueTitle();
Bundle getExtras();
int getRatingType();
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
index 173504b..f5cc4f6 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
@@ -15,8 +15,8 @@
package android.media.session;
-//import android.content.pm.ParceledListSlice;
import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
import android.media.session.ParcelableVolumeInfo;
import android.media.session.PlaybackState;
import android.media.session.MediaSession;
@@ -32,8 +32,7 @@
// These callbacks are for the TransportController
void onPlaybackStateChanged(in PlaybackState state);
void onMetadataChanged(in MediaMetadata metadata);
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- //void onQueueChanged(in ParceledListSlice queue);
+ void onQueueChanged(in MediaParceledListSlice queue);
void onQueueTitleChanged(CharSequence title);
void onExtrasChanged(in Bundle extras);
void onVolumeInfoChanged(in ParcelableVolumeInfo info);
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl
index 3578c16..d6c226f 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl
@@ -17,7 +17,6 @@
import android.content.ComponentName;
import android.media.IRemoteVolumeController;
-import android.media.ISessionTokensListener;
import android.media.session.IActiveSessionsListener;
import android.media.session.ICallback;
import android.media.session.IOnMediaKeyListener;
@@ -36,9 +35,10 @@
List<IBinder> getSessions(in ComponentName compName, int userId);
void dispatchMediaKeyEvent(String packageName, boolean asSystemService, in KeyEvent keyEvent,
boolean needWakeLock);
- void dispatchVolumeKeyEvent(String packageName, boolean asSystemService, in KeyEvent keyEvent,
- int stream, boolean musicOnly);
- void dispatchAdjustVolume(String packageName, int suggestedStream, int delta, int flags);
+ void dispatchVolumeKeyEvent(String packageName, String opPackageName, boolean asSystemService,
+ in KeyEvent keyEvent, int stream, boolean musicOnly);
+ void dispatchAdjustVolume(String packageName, String opPackageName, int suggestedStream,
+ int delta, int flags);
void addSessionsListener(in IActiveSessionsListener listener, in ComponentName compName,
int userId);
void removeSessionsListener(in IActiveSessionsListener listener);
@@ -55,12 +55,4 @@
// MediaSession2
boolean isTrusted(String controllerPackageName, int controllerPid, int controllerUid);
- boolean createSession2(in Bundle sessionToken);
- void destroySession2(in Bundle sessionToken);
- List<Bundle> getSessionTokens(boolean activeSessionOnly, boolean sessionServiceOnly,
- String packageName);
-
- void addSessionTokensListener(in ISessionTokensListener listener, int userId,
- String packageName);
- void removeSessionTokensListener(in ISessionTokensListener listener, String packageName);
}
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaController.java b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
index 60f74ab..eaebcfb 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaController.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
@@ -21,10 +21,10 @@
import android.annotation.UnsupportedAppUsage;
import android.app.PendingIntent;
import android.content.Context;
-//import android.content.pm.ParceledListSlice;
import android.media.AudioAttributes;
import android.media.AudioManager;
import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
import android.media.Rating;
import android.media.VolumeProvider;
import android.net.Uri;
@@ -153,9 +153,7 @@
return false;
}
try {
- //TODO(b/119748678): Resolve mContext.getOpPackageName() through this file.
- // Temporarilly it's replaced with "mContext.getOpPackageName()" for compiling.
- return mSessionBinder.sendMediaButton("mContext.getOpPackageName()", mCbStub,
+ return mSessionBinder.sendMediaButton(mContext.getPackageName(), mCbStub,
asSystemService, keyEvent);
} catch (RemoteException e) {
// System is dead. =(
@@ -188,8 +186,9 @@
break;
}
try {
- mSessionBinder.adjustVolume("mContext.getOpPackageName()", mCbStub, true,
- direction, AudioManager.FLAG_SHOW_UI);
+ mSessionBinder.adjustVolume(mContext.getPackageName(),
+ mContext.getOpPackageName(), mCbStub, true, direction,
+ AudioManager.FLAG_SHOW_UI);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling adjustVolumeBy", e);
}
@@ -199,8 +198,8 @@
final int flags = AudioManager.FLAG_PLAY_SOUND | AudioManager.FLAG_VIBRATE
| AudioManager.FLAG_FROM_KEY;
try {
- mSessionBinder.adjustVolume("mContext.getOpPackageName()", mCbStub, true, 0,
- flags);
+ mSessionBinder.adjustVolume(mContext.getPackageName(),
+ mContext.getOpPackageName(), mCbStub, true, 0, flags);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling adjustVolumeBy", e);
}
@@ -243,17 +242,14 @@
* @return The current play queue or null.
*/
public @Nullable List<MediaSession.QueueItem> getQueue() {
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
try {
- ParceledListSlice queue = mSessionBinder.getQueue();
+ MediaParceledListSlice queue = mSessionBinder.getQueue();
if (queue != null) {
return queue.getList();
}
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling getQueue.", e);
}
- */
return null;
}
@@ -372,7 +368,8 @@
*/
public void setVolumeTo(int value, int flags) {
try {
- mSessionBinder.setVolumeTo("mContext.getOpPackageName()", mCbStub, value, flags);
+ mSessionBinder.setVolumeTo(mContext.getPackageName(), mContext.getOpPackageName(),
+ mCbStub, value, flags);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling setVolumeTo.", e);
}
@@ -393,8 +390,8 @@
*/
public void adjustVolume(int direction, int flags) {
try {
- mSessionBinder.adjustVolume("mContext.getOpPackageName()", mCbStub, false, direction,
- flags);
+ mSessionBinder.adjustVolume(mContext.getPackageName(), mContext.getOpPackageName(),
+ mCbStub, false, direction, flags);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling adjustVolumeBy.", e);
}
@@ -460,7 +457,7 @@
throw new IllegalArgumentException("command cannot be null or empty");
}
try {
- mSessionBinder.sendCommand("mContext.getOpPackageName()", mCbStub, command, args, cb);
+ mSessionBinder.sendCommand(mContext.getPackageName(), mCbStub, command, args, cb);
} catch (RemoteException e) {
Log.d(TAG, "Dead object in sendCommand.", e);
}
@@ -526,7 +523,7 @@
if (!mCbRegistered) {
try {
- mSessionBinder.registerCallbackListener("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.registerCallbackListener(mContext.getPackageName(), mCbStub);
mCbRegistered = true;
} catch (RemoteException e) {
Log.e(TAG, "Dead object in registerCallback", e);
@@ -673,7 +670,7 @@
*/
public void prepare() {
try {
- mSessionBinder.prepare("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.prepare(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling prepare.", e);
}
@@ -697,7 +694,7 @@
"You must specify a non-empty String for prepareFromMediaId.");
}
try {
- mSessionBinder.prepareFromMediaId("mContext.getOpPackageName()", mCbStub, mediaId,
+ mSessionBinder.prepareFromMediaId(mContext.getPackageName(), mCbStub, mediaId,
extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling prepare(" + mediaId + ").", e);
@@ -724,7 +721,7 @@
query = "";
}
try {
- mSessionBinder.prepareFromSearch("mContext.getOpPackageName()", mCbStub, query,
+ mSessionBinder.prepareFromSearch(mContext.getPackageName(), mCbStub, query,
extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling prepare(" + query + ").", e);
@@ -749,7 +746,7 @@
"You must specify a non-empty Uri for prepareFromUri.");
}
try {
- mSessionBinder.prepareFromUri("mContext.getOpPackageName()", mCbStub, uri, extras);
+ mSessionBinder.prepareFromUri(mContext.getPackageName(), mCbStub, uri, extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling prepare(" + uri + ").", e);
}
@@ -760,7 +757,7 @@
*/
public void play() {
try {
- mSessionBinder.play("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.play(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling play.", e);
}
@@ -779,7 +776,7 @@
"You must specify a non-empty String for playFromMediaId.");
}
try {
- mSessionBinder.playFromMediaId("mContext.getOpPackageName()", mCbStub, mediaId,
+ mSessionBinder.playFromMediaId(mContext.getPackageName(), mCbStub, mediaId,
extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling play(" + mediaId + ").", e);
@@ -802,7 +799,7 @@
query = "";
}
try {
- mSessionBinder.playFromSearch("mContext.getOpPackageName()", mCbStub, query, extras);
+ mSessionBinder.playFromSearch(mContext.getPackageName(), mCbStub, query, extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling play(" + query + ").", e);
}
@@ -821,7 +818,7 @@
"You must specify a non-empty Uri for playFromUri.");
}
try {
- mSessionBinder.playFromUri("mContext.getOpPackageName()", mCbStub, uri, extras);
+ mSessionBinder.playFromUri(mContext.getPackageName(), mCbStub, uri, extras);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling play(" + uri + ").", e);
}
@@ -833,7 +830,7 @@
*/
public void skipToQueueItem(long id) {
try {
- mSessionBinder.skipToQueueItem("mContext.getOpPackageName()", mCbStub, id);
+ mSessionBinder.skipToQueueItem(mContext.getPackageName(), mCbStub, id);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling skipToItem(" + id + ").", e);
}
@@ -845,7 +842,7 @@
*/
public void pause() {
try {
- mSessionBinder.pause("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.pause(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling pause.", e);
}
@@ -857,7 +854,7 @@
*/
public void stop() {
try {
- mSessionBinder.stop("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.stop(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling stop.", e);
}
@@ -870,7 +867,7 @@
*/
public void seekTo(long pos) {
try {
- mSessionBinder.seekTo("mContext.getOpPackageName()", mCbStub, pos);
+ mSessionBinder.seekTo(mContext.getPackageName(), mCbStub, pos);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling seekTo.", e);
}
@@ -882,7 +879,7 @@
*/
public void fastForward() {
try {
- mSessionBinder.fastForward("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.fastForward(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling fastForward.", e);
}
@@ -893,7 +890,7 @@
*/
public void skipToNext() {
try {
- mSessionBinder.next("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.next(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling next.", e);
}
@@ -905,7 +902,7 @@
*/
public void rewind() {
try {
- mSessionBinder.rewind("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.rewind(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling rewind.", e);
}
@@ -916,7 +913,7 @@
*/
public void skipToPrevious() {
try {
- mSessionBinder.previous("mContext.getOpPackageName()", mCbStub);
+ mSessionBinder.previous(mContext.getPackageName(), mCbStub);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling previous.", e);
}
@@ -931,7 +928,7 @@
*/
public void setRating(Rating rating) {
try {
- mSessionBinder.rate("mContext.getOpPackageName()", mCbStub, rating);
+ mSessionBinder.rate(mContext.getPackageName(), mCbStub, rating);
} catch (RemoteException e) {
Log.wtf(TAG, "Error calling rate.", e);
}
@@ -966,7 +963,7 @@
throw new IllegalArgumentException("CustomAction cannot be null.");
}
try {
- mSessionBinder.sendCustomAction("mContext.getOpPackageName()", mCbStub, action, args);
+ mSessionBinder.sendCustomAction(mContext.getPackageName(), mCbStub, action, args);
} catch (RemoteException e) {
Log.d(TAG, "Dead object in sendCustomAction.", e);
}
@@ -1102,10 +1099,8 @@
}
}
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
@Override
- public void onQueueChanged(ParceledListSlice parceledQueue) {
+ public void onQueueChanged(MediaParceledListSlice parceledQueue) {
List<MediaSession.QueueItem> queue = parceledQueue == null ? null : parceledQueue
.getList();
MediaController controller = mController.get();
@@ -1113,7 +1108,6 @@
controller.postMessage(MSG_UPDATE_QUEUE, queue, null);
}
}
- */
@Override
public void onQueueTitleChanged(CharSequence title) {
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
index 04dc0b8..943843d 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
@@ -24,12 +24,13 @@
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
-//import android.content.pm.ParceledListSlice;
import android.media.AudioAttributes;
import android.media.MediaDescription;
import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
import android.media.Rating;
import android.media.VolumeProvider;
+import android.media.session.MediaSessionManager.RemoteUserInfo;
import android.net.Uri;
import android.os.Bundle;
import android.os.Handler;
@@ -40,7 +41,6 @@
import android.os.RemoteException;
import android.os.ResultReceiver;
import android.os.UserHandle;
-import android.media.session.MediaSessionManager.RemoteUserInfo;
import android.service.media.MediaBrowserService;
import android.text.TextUtils;
import android.util.Log;
@@ -171,10 +171,8 @@
if (TextUtils.isEmpty(tag)) {
throw new IllegalArgumentException("tag cannot be null or empty");
}
- //TODO(b/119749798): Resolve hidden API usage. com.android.internal.R
- //mMaxBitmapSize = context.getResources().getDimensionPixelSize(
- //com.android.internal.R.dimen.config_mediaMetadataBitmapMaxSize);
- mMaxBitmapSize = 1024; //TODO: remove this.
+ mMaxBitmapSize = context.getResources().getDimensionPixelSize(
+ android.R.dimen.config_mediaMetadataBitmapMaxSize);
mCbStub = new CallbackStub(this);
MediaSessionManager manager = (MediaSessionManager) context
.getSystemService(Context.MEDIA_SESSION_SERVICE);
@@ -441,11 +439,21 @@
* @see android.media.MediaMetadata.Builder#putBitmap
*/
public void setMetadata(@Nullable MediaMetadata metadata) {
+ long duration = -1;
+ int fields = 0;
+ MediaDescription description = null;
if (metadata != null) {
metadata = (new MediaMetadata.Builder(metadata, mMaxBitmapSize)).build();
+ if (metadata.containsKey(MediaMetadata.METADATA_KEY_DURATION)) {
+ duration = metadata.getLong(MediaMetadata.METADATA_KEY_DURATION);
+ }
+ fields = metadata.size();
+ description = metadata.getDescription();
}
+ String metadataDescription = "size=" + fields + ", description=" + description;
+
try {
- mBinder.setMetadata(metadata);
+ mBinder.setMetadata(metadata, duration, metadataDescription);
} catch (RemoteException e) {
Log.wtf(TAG, "Dead object in setPlaybackState.", e);
}
@@ -463,14 +471,11 @@
* @param queue A list of items in the play queue.
*/
public void setQueue(@Nullable List<QueueItem> queue) {
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
try {
- mBinder.setQueue(queue == null ? null : new ParceledListSlice<QueueItem>(queue));
+ mBinder.setQueue(queue == null ? null : new MediaParceledListSlice<QueueItem>(queue));
} catch (RemoteException e) {
Log.wtf("Dead object in setQueue.", e);
}
- */
}
/**
diff --git a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
index bcc2826..8dc480d 100644
--- a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
+++ b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
@@ -2,8 +2,8 @@
package android.service.media;
-//import android.content.pm.ParceledListSlice;
import android.graphics.Bitmap;
+import android.media.MediaParceledListSlice;
import android.media.session.MediaSession;
import android.os.Bundle;
@@ -22,7 +22,7 @@
*/
void onConnect(String root, in MediaSession.Token session, in Bundle extras);
void onConnectFailed();
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- //void onLoadChildren(String mediaId, in ParceledListSlice list);
- //void onLoadChildrenWithOptions(String mediaId, in ParceledListSlice list, in Bundle options);
+ void onLoadChildren(String mediaId, in MediaParceledListSlice list);
+ void onLoadChildrenWithOptions(String mediaId, in MediaParceledListSlice list,
+ in Bundle options);
}
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
index fa7696e..a66ec35 100644
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
@@ -25,7 +25,7 @@
import android.app.Service;
import android.content.Intent;
import android.content.pm.PackageManager;
-//import android.content.pm.ParceledListSlice;
+import android.media.MediaParceledListSlice;
import android.media.browse.MediaBrowser;
import android.media.browse.MediaBrowserUtils;
import android.media.session.MediaSession;
@@ -687,10 +687,8 @@
List<MediaBrowser.MediaItem> filteredList =
(flag & RESULT_FLAG_OPTION_NOT_HANDLED) != 0
? applyOptions(list, options) : list;
- //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
- /*
- final ParceledListSlice<MediaBrowser.MediaItem> pls =
- filteredList == null ? null : new ParceledListSlice<>(filteredList);
+ final MediaParceledListSlice<MediaBrowser.MediaItem> pls =
+ filteredList == null ? null : new MediaParceledListSlice<>(filteredList);
try {
connection.callbacks.onLoadChildrenWithOptions(parentId, pls, options);
} catch (RemoteException ex) {
@@ -698,7 +696,6 @@
Log.w(TAG, "Calling onLoadChildren() failed for id=" + parentId
+ " package=" + connection.pkg);
}
- */
}
};
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 02ab8ad..d71aa15 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -25,7 +25,11 @@
libmedia_helper \
libmediametrics \
libmediautils \
- libeffectsconfig
+ libeffectsconfig \
+ libsensorprivacy
+
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := \
+ libsensorprivacy
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
@@ -47,6 +51,7 @@
libcutils \
libutils \
liblog \
+ libaudioclient \
libsoundtrigger
ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index ea6389c..1c2b9d7 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -210,6 +210,10 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes) = 0;
virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes) = 0;
+ virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ = 0;
+ virtual status_t removeUidDeviceAffinities(uid_t uid) = 0;
+
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId,
@@ -332,10 +336,13 @@
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
virtual void onRecordingConfigurationUpdate(int event,
- const record_client_info_t *clientInfo,
- const struct audio_config_base *clientConfig,
- const struct audio_config_base *deviceConfig,
- audio_patch_handle_t patchHandle) = 0;
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source) = 0;
};
extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 9f8b8c0..fa9ba0b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -23,6 +23,7 @@
#include "AudioIODescriptorInterface.h"
#include "AudioPort.h"
#include "ClientDescriptor.h"
+#include "EffectDescriptor.h"
namespace android {
@@ -62,7 +63,8 @@
bool isSoundTrigger() const;
void setClientActive(const sp<RecordClientDescriptor>& client, bool active);
int32_t activeCount() { return mGlobalActiveCount; }
-
+ void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+ EffectDescriptorCollection getEnabledEffects() const;
// implementation of AudioIODescriptorInterface
audio_config_base_t getConfig() const override;
audio_patch_handle_t getPatchHandle() const override;
@@ -86,6 +88,11 @@
RecordClientVector clientsList(bool activeOnly = false,
audio_source_t source = AUDIO_SOURCE_DEFAULT, bool preferredDeviceOnly = false) const;
+ void setAppState(uid_t uid, app_state_t state);
+
+ // implementation of ClientMapHandler<RecordClientDescriptor>
+ void addClient(const sp<RecordClientDescriptor> &client) override;
+
private:
void updateClientRecordingConfiguration(int event, const sp<RecordClientDescriptor>& client);
@@ -101,6 +108,7 @@
SortedVector<audio_session_t> mPreemptedSessions;
AudioPolicyClientInterface * const mClientInterface;
int32_t mGlobalActiveCount = 0; // non-client-specific activity ref count
+ EffectDescriptorCollection mEnabledEffects;
};
class AudioInputCollection :
@@ -126,6 +134,8 @@
sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
+ void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+
void dump(String8 *dst) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 96c00ea..955e87b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -80,6 +80,10 @@
status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+ status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+ status_t removeUidDeviceAffinities(uid_t uid);
+ status_t getDevicesForUid(uid_t uid, Vector<AudioDeviceTypeAddr>& devices) const;
+
void dump(String8 *dst) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index ebb9352..bb9cad8 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -116,6 +116,7 @@
audio_module_handle_t getModuleHandle() const;
uint32_t getModuleVersionMajor() const;
const char *getModuleName() const;
+ sp<HwModule> getModule() const { return mModule; }
bool useInputChannelMask() const
{
@@ -137,12 +138,12 @@
void log(const char* indent) const;
AudioGainCollection mGains; // gain controllers
- sp<HwModule> mModule; // audio HW module exposing this I/O stream
private:
void pickChannelMask(audio_channel_mask_t &channelMask, const ChannelsVector &channelMasks) const;
void pickSamplingRate(uint32_t &rate,const SampleRateVector &samplingRates) const;
+ sp<HwModule> mModule; // audio HW module exposing this I/O stream
String8 mName;
audio_port_type_t mType;
audio_port_role_t mRole;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
index 330f1d4..0357ff4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
@@ -46,6 +46,19 @@
audio_route_type_t getType() const { return mType; }
+ /**
+ * @brief supportsPatch checks if an audio patch is supported by a Route declared in
+ * the audio_policy_configuration.xml file.
+ * If the patch is supported natively by an AudioHAL (which supports of course Routing API 3.0),
+ * audiopolicy will not request AudioFlinger to use a software bridge to realize a patch
+ * between 2 ports.
+ * @param srcPort (aka the source) to be considered
+ * @param dstPort (aka the sink) to be considered
+ * @return true if the audio route supports the connection between the sink and the source,
+ * false otherwise
+ */
+ bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+
void dump(String8 *dst, int spaces) const;
private:
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 986d109..a187029 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -28,6 +28,7 @@
#include <utils/RefBase.h>
#include <utils/String8.h>
#include "AudioPatch.h"
+#include "EffectDescriptor.h"
#include "RoutingStrategy.h"
namespace android {
@@ -119,13 +120,15 @@
void setAppState(app_state_t appState) { mAppState = appState; }
app_state_t appState() { return mAppState; }
bool isSilenced() const { return mAppState == APP_STATE_IDLE; }
+ void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+ EffectDescriptorCollection getEnabledEffects() const { return mEnabledEffects; }
private:
const audio_source_t mSource;
const audio_input_flags_t mFlags;
const bool mIsSoundTrigger;
app_state_t mAppState;
-
+ EffectDescriptorCollection mEnabledEffects;
};
class SourceClientDescriptor: public TrackClientDescriptor
@@ -172,7 +175,7 @@
virtual ~ClientMapHandler() = default;
// Track client management
- void addClient(const sp<T> &client) {
+ virtual void addClient(const sp<T> &client) {
const audio_port_handle_t portId = client->portId();
LOG_ALWAYS_FATAL_IF(!mClients.emplace(portId, client).second,
"%s(%d): attempting to add client that already exists", __func__, portId);
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 6f99bf3..d02123c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -39,6 +39,8 @@
virtual const String8 getTagName() const { return mTagName; }
audio_devices_t type() const { return mDeviceType; }
+ String8 address() const { return mAddress; }
+ void setAddress(const String8 &address) { mAddress = address; }
const FormatVector& encodedFormats() const { return mEncodedFormats; }
@@ -57,39 +59,113 @@
audio_port_handle_t getId() const;
void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
void log() const;
-
- String8 mAddress;
+ std::string toString() const;
private:
+ String8 mAddress{""};
String8 mTagName; // Unique human readable identifier for a device port found in conf file.
audio_devices_t mDeviceType;
FormatVector mEncodedFormats;
- audio_port_handle_t mId;
-
-friend class DeviceVector;
+ audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
};
class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
{
public:
DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
+ explicit DeviceVector(const sp<DeviceDescriptor>& item) : DeviceVector()
+ {
+ add(item);
+ }
ssize_t add(const sp<DeviceDescriptor>& item);
void add(const DeviceVector &devices);
ssize_t remove(const sp<DeviceDescriptor>& item);
+ void remove(const DeviceVector &devices);
ssize_t indexOf(const sp<DeviceDescriptor>& item) const;
audio_devices_t types() const { return mDeviceTypes; }
// If 'address' is empty, a device with a non-empty address may be returned
// if there is no device with the specified 'type' and empty address.
- sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address) const;
+ sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address = {}) const;
DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
+
+ /**
+ * @brief getDeviceFromId
+ * @param id of the DeviceDescriptor to seach (aka Port handle).
+ * @return DeviceDescriptor associated to port id if found, nullptr otherwise. If the id is
+ * equal to AUDIO_PORT_HANDLE_NONE, it also returns a nullptr.
+ */
sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
+ bool contains(const sp<DeviceDescriptor>& item) const { return indexOf(item) >= 0; }
+
+ /**
+ * @brief containsAtLeastOne
+ * @param devices vector of devices to check against.
+ * @return true if the DeviceVector contains at list one of the devices from the given vector.
+ */
+ bool containsAtLeastOne(const DeviceVector &devices) const;
+
+ /**
+ * @brief containsAllDevices
+ * @param devices vector of devices to check against.
+ * @return true if the DeviceVector contains all the devices from the given vector
+ */
+ bool containsAllDevices(const DeviceVector &devices) const;
+
+ /**
+ * @brief filter the devices supported by this collection against another collection
+ * @param devices to filter against
+ * @return
+ */
+ DeviceVector filter(const DeviceVector &devices) const;
+
+ /**
+ * @brief merge two vectors. As SortedVector Implementation is buggy (it does not check the size
+ * of the destination vector, only of the source, it provides a safe implementation
+ * @param devices source device vector to merge with
+ * @return size of the merged vector.
+ */
+ ssize_t merge(const DeviceVector &devices)
+ {
+ if (isEmpty()) {
+ add(devices);
+ return size();
+ }
+ return SortedVector::merge(devices);
+ }
+
+ /**
+ * @brief operator == DeviceVector are equals if all the DeviceDescriptor can be found (aka
+ * DeviceDescriptor with same type and address) and the vector has same size.
+ * @param right DeviceVector to compare to.
+ * @return true if right contains the same device and has the same size.
+ */
+ bool operator==(const DeviceVector &right) const
+ {
+ if (size() != right.size()) {
+ return false;
+ }
+ for (const auto &device : *this) {
+ if (right.indexOf(device) < 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const DeviceVector &right) const
+ {
+ return !operator==(right);
+ }
+
+ std::string toString() const;
+
void dump(String8 *dst, const String8 &tag, int spaces = 0, bool verbose = true) const;
private:
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index 9fa7486..2dc33ab 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -25,12 +25,12 @@
namespace android {
-
class EffectDescriptor : public RefBase
{
public:
- void dump(String8 *dst) const;
+ void dump(String8 *dst, int spaces = 0) const;
+ int mId; // effect unique ID
int mIo; // io the effect is attached to
routing_strategy mStrategy; // routing strategy the effect is associated to
int mSession; // audio session the effect is on
@@ -46,12 +46,14 @@
status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
uint32_t strategy, int session, int id);
status_t unregisterEffect(int id);
+ sp<EffectDescriptor> getEffect(int id) const;
status_t setEffectEnabled(int id, bool enabled);
+ bool isEffectEnabled(int id) const;
uint32_t getMaxEffectsCpuLoad() const;
uint32_t getMaxEffectsMemory() const;
- bool isNonOffloadableEffectEnabled();
+ bool isNonOffloadableEffectEnabled() const;
- void dump(String8 *dst) const;
+ void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
private:
status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 6560431..2b57fa9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -81,6 +81,17 @@
return mPorts.findByTagName(tagName);
}
+ /**
+ * @brief supportsPatch checks if an audio patch between 2 ports beloging to this HwModule
+ * is supported by a HwModule. The ports and the route shall be declared in the
+ * audio_policy_configuration.xml file.
+ * @param srcPort (aka the source) to be considered
+ * @param dstPort (aka the sink) to be considered
+ * @return true if the HwModule supports the connection between the sink and the source,
+ * false otherwise
+ */
+ bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+
// TODO remove from here (split serialization)
void dump(String8 *dst) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 8ff8238..ca6ca56 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -88,7 +88,7 @@
bool supportDeviceAddress(const String8 &address) const
{
- return mSupportedDevices[0]->mAddress == address;
+ return mSupportedDevices[0]->address() == address;
}
// chose first device present in mSupportedDevices also part of deviceType
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 63c19d1..6b0476c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -23,26 +23,10 @@
namespace android {
-struct DeviceCategoryTraits
-{
- typedef device_category Type;
- typedef Vector<Type> Collection;
-};
-struct MixTypeTraits
-{
- typedef int32_t Type;
- typedef Vector<Type> Collection;
-};
-struct RouteFlagTraits
-{
- typedef uint32_t Type;
- typedef Vector<Type> Collection;
-};
-struct RuleTraits
-{
- typedef uint32_t Type;
- typedef Vector<Type> Collection;
-};
+struct RuleTraits : public DefaultTraits<uint32_t> {};
+using DeviceCategoryTraits = DefaultTraits<device_category>;
+struct MixTypeTraits : public DefaultTraits<int32_t> {};
+struct RouteFlagTraits : public DefaultTraits<uint32_t> {};
typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
typedef TypeConverter<MixTypeTraits> MixTypeConverter;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 559274f..0bc88a5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -269,6 +269,16 @@
}
}
+void AudioInputDescriptor::addClient(const sp<RecordClientDescriptor> &client) {
+ ClientMapHandler<RecordClientDescriptor>::addClient(client);
+
+ for (size_t i = 0; i < mEnabledEffects.size(); i++) {
+ if (mEnabledEffects.valueAt(i)->mSession == client->session()) {
+ client->trackEffectEnabled(mEnabledEffects.valueAt(i), true);
+ }
+ }
+}
+
void AudioInputDescriptor::setClientActive(const sp<RecordClientDescriptor>& client, bool active)
{
LOG_ALWAYS_FATAL_IF(getClient(client->portId()) == nullptr,
@@ -312,11 +322,26 @@
int event, const sp<RecordClientDescriptor>& client)
{
const audio_config_base_t sessionConfig = client->config();
- const record_client_info_t recordClientInfo{client->uid(), client->session(), client->source()};
+ const record_client_info_t recordClientInfo{client->uid(), client->session(),
+ client->source(), client->portId(),
+ client->isSilenced()};
const audio_config_base_t config = getConfig();
- mClientInterface->onRecordingConfigurationUpdate(event,
- &recordClientInfo, &sessionConfig,
- &config, mPatchHandle);
+
+ std::vector<effect_descriptor_t> clientEffects;
+ EffectDescriptorCollection effectsList = client->getEnabledEffects();
+ for (size_t i = 0; i < effectsList.size(); i++) {
+ clientEffects.push_back(effectsList.valueAt(i)->mDesc);
+ }
+
+ std::vector<effect_descriptor_t> effects;
+ effectsList = getEnabledEffects();
+ for (size_t i = 0; i < effectsList.size(); i++) {
+ effects.push_back(effectsList.valueAt(i)->mDesc);
+ }
+
+ mClientInterface->onRecordingConfigurationUpdate(event, &recordClientInfo, &sessionConfig,
+ clientEffects, &config, effects,
+ mPatchHandle, source());
}
RecordClientVector AudioInputDescriptor::getClientsForSession(
@@ -345,6 +370,53 @@
return clients;
}
+void AudioInputDescriptor::trackEffectEnabled(const sp<EffectDescriptor> &effect,
+ bool enabled)
+{
+ if (enabled) {
+ mEnabledEffects.replaceValueFor(effect->mId, effect);
+ } else {
+ mEnabledEffects.removeItem(effect->mId);
+ }
+
+ RecordClientVector clients = getClientsForSession((audio_session_t)effect->mSession);
+ for (const auto& client : clients) {
+ sp<EffectDescriptor> clientEffect = client->getEnabledEffects().getEffect(effect->mId);
+ bool changed = (enabled && clientEffect == nullptr)
+ || (!enabled && clientEffect != nullptr);
+ client->trackEffectEnabled(effect, enabled);
+ if (changed && client->active()) {
+ updateClientRecordingConfiguration(RECORD_CONFIG_EVENT_START, client);
+ }
+ }
+}
+
+EffectDescriptorCollection AudioInputDescriptor::getEnabledEffects() const
+{
+ EffectDescriptorCollection enabledEffects;
+ // report effects for highest priority active source as applied to all clients
+ RecordClientVector clients =
+ clientsList(true /*activeOnly*/, source(), false /*preferredDeviceOnly*/);
+ if (clients.size() > 0) {
+ enabledEffects = clients[0]->getEnabledEffects();
+ }
+ return enabledEffects;
+}
+
+void AudioInputDescriptor::setAppState(uid_t uid, app_state_t state) {
+ RecordClientVector clients = clientsList(false /*activeOnly*/);
+
+ for (const auto& client : clients) {
+ if (uid == client->uid()) {
+ bool wasSilenced = client->isSilenced();
+ client->setAppState(state);
+ if (client->active() && wasSilenced != client->isSilenced()) {
+ updateClientRecordingConfiguration(RECORD_CONFIG_EVENT_START, client);
+ }
+ }
+ }
+}
+
void AudioInputDescriptor::dump(String8 *dst) const
{
dst->appendFormat(" ID: %d\n", getId());
@@ -352,6 +424,7 @@
dst->appendFormat(" Format: %d\n", mFormat);
dst->appendFormat(" Channels: %08x\n", mChannelMask);
dst->appendFormat(" Devices %08x\n", mDevice);
+ getEnabledEffects().dump(dst, 1 /*spaces*/, false /*verbose*/);
dst->append(" AudioRecord Clients:\n");
ClientMapHandler<RecordClientDescriptor>::dump(dst);
dst->append("\n");
@@ -424,6 +497,17 @@
return 0;
}
+void AudioInputCollection::trackEffectEnabled(const sp<EffectDescriptor> &effect,
+ bool enabled)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioInputDescriptor> inputDesc = valueAt(i);
+ if (inputDesc->mIoHandle == effect->mIo) {
+ return inputDesc->trackEffectEnabled(effect, enabled);
+ }
+ }
+}
+
void AudioInputCollection::dump(String8 *dst) const
{
dst->append("\nInputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4ce6b08..97504ab 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -698,8 +698,8 @@
sp<SwAudioOutputDescriptor> primaryOutput = getPrimaryOutput();
if ((primaryOutput != NULL) && (primaryOutput->mProfile != NULL)
- && (primaryOutput->mProfile->mModule != NULL)) {
- sp<HwModule> primaryHwModule = primaryOutput->mProfile->mModule;
+ && (primaryOutput->mProfile->getModule() != NULL)) {
+ sp<HwModule> primaryHwModule = primaryOutput->mProfile->getModule();
Vector <sp<IOProfile>> primaryHwModuleOutputProfiles =
primaryHwModule->getOutputProfiles();
for (size_t i = 0; i < primaryHwModuleOutputProfiles.size(); i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 3cf8014..776d98f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -340,6 +340,87 @@
return NO_ERROR;
}
+status_t AudioPolicyMixCollection::setUidDeviceAffinities(uid_t uid,
+ const Vector<AudioDeviceTypeAddr>& devices) {
+ // remove existing rules for this uid
+ removeUidDeviceAffinities(uid);
+
+ // for each player mix: add a rule to match or exclude the uid based on the device
+ for (size_t i = 0; i < size(); i++) {
+ const AudioMix *mix = valueAt(i)->getMix();
+ if (mix->mMixType != MIX_TYPE_PLAYERS) {
+ continue;
+ }
+ // check if this mix goes to a device in the list of devices
+ bool deviceMatch = false;
+ for (size_t j = 0; j < devices.size(); j++) {
+ if (devices[j].mType == mix->mDeviceType
+ && devices[j].mAddress == mix->mDeviceAddress) {
+ deviceMatch = true;
+ break;
+ }
+ }
+ if (!deviceMatch) {
+ // this mix doesn't go to one of the listed devices for the given uid,
+ // modify its rules to exclude the uid
+ mix->excludeUid(uid);
+ }
+ }
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::removeUidDeviceAffinities(uid_t uid) {
+ // for each player mix: remove existing rules that match or exclude this uid
+ for (size_t i = 0; i < size(); i++) {
+ bool foundUidRule = false;
+ AudioMix *mix = valueAt(i)->getMix();
+ if (mix->mMixType != MIX_TYPE_PLAYERS) {
+ continue;
+ }
+ std::vector<size_t> criteriaToRemove;
+ for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+ const uint32_t rule = mix->mCriteria[j].mRule;
+ // is this rule affecting the uid?
+ if (rule == RULE_EXCLUDE_UID
+ && uid == mix->mCriteria[j].mValue.mUid) {
+ foundUidRule = true;
+ criteriaToRemove.push_back(j);
+ }
+ }
+ if (foundUidRule) {
+ for (size_t j = criteriaToRemove.size() - 1; j >= 0; j--) {
+ mix->mCriteria.removeAt(criteriaToRemove[j]);
+ }
+ }
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::getDevicesForUid(uid_t uid,
+ Vector<AudioDeviceTypeAddr>& devices) const {
+ // for each player mix: find rules that don't exclude this uid, and add the device to the list
+ for (size_t i = 0; i < size(); i++) {
+ bool ruleAllowsUid = true;
+ AudioMix *mix = valueAt(i)->getMix();
+ if (mix->mMixType != MIX_TYPE_PLAYERS) {
+ continue;
+ }
+ for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+ const uint32_t rule = mix->mCriteria[j].mRule;
+ if (rule == RULE_EXCLUDE_UID
+ && uid == mix->mCriteria[j].mValue.mUid) {
+ ruleAllowsUid = false;
+ break;
+ }
+ }
+ if (ruleAllowsUid) {
+ devices.add(AudioDeviceTypeAddr(mix->mDeviceType, mix->mDeviceAddress));
+ }
+ }
+ return NO_ERROR;
+}
+
void AudioPolicyMixCollection::dump(String8 *dst) const
{
dst->append("\nAudio Policy Mix:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index c1fe5b0..79f0919 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -37,4 +37,19 @@
dst->append("\n");
}
+bool AudioRoute::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const
+{
+ if (mSink == 0 || dstPort == 0 || dstPort != mSink) {
+ return false;
+ }
+ ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().string());
+ for (const auto &sourcePort : mSources) {
+ if (sourcePort == srcPort) {
+ ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().string());
+ return true;
+ }
+ }
+ return false;
+}
+
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 815612d..82d64c9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -63,10 +63,20 @@
return ss.str();
}
+void RecordClientDescriptor::trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled)
+{
+ if (enabled) {
+ mEnabledEffects.replaceValueFor(effect->mId, effect);
+ } else {
+ mEnabledEffects.removeItem(effect->mId);
+ }
+}
+
void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
{
ClientDescriptor::dump(dst, spaces, index);
dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+ mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
}
SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 9e5f944..04cbcd1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -35,7 +35,7 @@
AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
AUDIO_PORT_ROLE_SOURCE),
- mAddress(""), mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats), mId(0)
+ mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
{
if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
mAddress = String8("0");
@@ -132,6 +132,13 @@
return ret;
}
+void DeviceVector::remove(const DeviceVector &devices)
+{
+ for (const auto& device : devices) {
+ remove(device);
+ }
+}
+
DeviceVector DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
{
DeviceVector devices;
@@ -159,9 +166,9 @@
sp<DeviceDescriptor> device;
for (size_t i = 0; i < size(); i++) {
if (itemAt(i)->type() == type) {
- if (address == "" || itemAt(i)->mAddress == address) {
+ if (address == "" || itemAt(i)->address() == address) {
device = itemAt(i);
- if (itemAt(i)->mAddress == address) {
+ if (itemAt(i)->address() == address) {
break;
}
}
@@ -174,9 +181,11 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const
{
- for (const auto& device : *this) {
- if (device->getId() == id) {
- return device;
+ if (id != AUDIO_PORT_HANDLE_NONE) {
+ for (const auto& device : *this) {
+ if (device->getId() == id) {
+ return device;
+ }
}
}
return nullptr;
@@ -188,8 +197,8 @@
bool isOutput = audio_is_output_devices(type);
type &= ~AUDIO_DEVICE_BIT_IN;
for (size_t i = 0; (i < size()) && (type != AUDIO_DEVICE_NONE); i++) {
- bool curIsOutput = audio_is_output_devices(itemAt(i)->mDeviceType);
- audio_devices_t curType = itemAt(i)->mDeviceType & ~AUDIO_DEVICE_BIT_IN;
+ bool curIsOutput = audio_is_output_devices(itemAt(i)->type());
+ audio_devices_t curType = itemAt(i)->type() & ~AUDIO_DEVICE_BIT_IN;
if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
devices.add(itemAt(i));
type &= ~curType;
@@ -251,8 +260,7 @@
// without the test?
// This has been demonstrated to NOT be true (at start up)
// ALOG_ASSERT(mModule != NULL);
- dstConfig->ext.device.hw_module =
- mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
+ dstConfig->ext.device.hw_module = getModuleHandle();
(void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.string());
}
@@ -263,7 +271,7 @@
port->id = mId;
toAudioPortConfig(&port->active_config);
port->ext.device.type = mDeviceType;
- port->ext.device.hw_module = mModule->getHandle();
+ port->ext.device.hw_module = getModuleHandle();
(void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.string());
}
@@ -294,6 +302,49 @@
AudioPort::dump(dst, spaces, verbose);
}
+std::string DeviceDescriptor::toString() const
+{
+ std::stringstream sstream;
+ sstream << "type:0x" << std::hex << type() << ",@:" << mAddress;
+ return sstream.str();
+}
+
+std::string DeviceVector::toString() const
+{
+ if (isEmpty()) {
+ return {"AUDIO_DEVICE_NONE"};
+ }
+ std::string result = {"{"};
+ for (const auto &device : *this) {
+ if (device != *begin()) {
+ result += ";";
+ }
+ result += device->toString();
+ }
+ return result + "}";
+}
+
+DeviceVector DeviceVector::filter(const DeviceVector &devices) const
+{
+ DeviceVector filteredDevices;
+ for (const auto &device : *this) {
+ if (devices.contains(device)) {
+ filteredDevices.add(device);
+ }
+ }
+ return filteredDevices;
+}
+
+bool DeviceVector::containsAtLeastOne(const DeviceVector &devices) const
+{
+ return !filter(devices).isEmpty();
+}
+
+bool DeviceVector::containsAllDevices(const DeviceVector &devices) const
+{
+ return filter(devices).size() == devices.size();
+}
+
void DeviceDescriptor::log() const
{
std::string device;
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 8bbb798..40c49e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -22,13 +22,13 @@
namespace android {
-void EffectDescriptor::dump(String8 *dst) const
+void EffectDescriptor::dump(String8 *dst, int spaces) const
{
- dst->appendFormat(" I/O: %d\n", mIo);
- dst->appendFormat(" Strategy: %d\n", mStrategy);
- dst->appendFormat(" Session: %d\n", mSession);
- dst->appendFormat(" Name: %s\n", mDesc.name);
- dst->appendFormat(" %s\n", mEnabled ? "Enabled" : "Disabled");
+ dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
+ dst->appendFormat("%*sStrategy: %d\n", spaces, "", mStrategy);
+ dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
+ dst->appendFormat("%*sName: %s\n", spaces, "", mDesc.name);
+ dst->appendFormat("%*s%s\n", spaces, "", mEnabled ? "Enabled" : "Disabled");
}
EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -45,6 +45,11 @@
int session,
int id)
{
+ if (getEffect(id) != nullptr) {
+ ALOGW("%s effect %s already registered", __FUNCTION__, desc->name);
+ return INVALID_OPERATION;
+ }
+
if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
desc->name, desc->memoryUsage);
@@ -60,6 +65,7 @@
sp<EffectDescriptor> effectDesc = new EffectDescriptor();
memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
+ effectDesc->mId = id;
effectDesc->mIo = io;
effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
effectDesc->mSession = session;
@@ -70,18 +76,23 @@
return NO_ERROR;
}
-status_t EffectDescriptorCollection::unregisterEffect(int id)
+sp<EffectDescriptor> EffectDescriptorCollection::getEffect(int id) const
{
ssize_t index = indexOfKey(id);
if (index < 0) {
- ALOGW("unregisterEffect() unknown effect ID %d", id);
+ return nullptr;
+ }
+ return valueAt(index);
+}
+
+status_t EffectDescriptorCollection::unregisterEffect(int id)
+{
+ sp<EffectDescriptor> effectDesc = getEffect(id);
+ if (effectDesc == nullptr) {
+ ALOGW("%s unknown effect ID %d", __FUNCTION__, id);
return INVALID_OPERATION;
}
- sp<EffectDescriptor> effectDesc = valueAt(index);
-
- setEffectEnabled(effectDesc, false);
-
if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
ALOGW("unregisterEffect() memory %d too big for total %d",
effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
@@ -107,6 +118,14 @@
return setEffectEnabled(valueAt(index), enabled);
}
+bool EffectDescriptorCollection::isEffectEnabled(int id) const
+{
+ ssize_t index = indexOfKey(id);
+ if (index < 0) {
+ return false;
+ }
+ return valueAt(index)->mEnabled;
+}
status_t EffectDescriptorCollection::setEffectEnabled(const sp<EffectDescriptor> &effectDesc,
bool enabled)
@@ -138,7 +157,7 @@
return NO_ERROR;
}
-bool EffectDescriptorCollection::isNonOffloadableEffectEnabled()
+bool EffectDescriptorCollection::isNonOffloadableEffectEnabled() const
{
for (size_t i = 0; i < size(); i++) {
sp<EffectDescriptor> effectDesc = valueAt(i);
@@ -162,15 +181,21 @@
return MAX_EFFECTS_MEMORY;
}
-void EffectDescriptorCollection::dump(String8 *dst) const
+void EffectDescriptorCollection::dump(String8 *dst, int spaces, bool verbose) const
{
- dst->appendFormat(
- "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB, Max memory used: %d KB\n",
- (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory, mTotalEffectsMemoryMaxUsed);
- dst->append("Registered effects:\n");
+ if (verbose) {
+ dst->appendFormat(
+ "\n%*sTotal Effects CPU: %f MIPS, "
+ "Total Effects memory: %d KB, Max memory used: %d KB\n",
+ spaces, "",
+ (float) mTotalEffectsCpuLoad / 10,
+ mTotalEffectsMemory,
+ mTotalEffectsMemoryMaxUsed);
+ }
+ dst->appendFormat("%*sEffects:\n", spaces, "");
for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("- Effect %d dump:\n", keyAt(i));
- valueAt(i)->dump(dst);
+ dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
+ valueAt(i)->dump(dst, spaces + 2);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 92bc595..80af88d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -51,7 +51,7 @@
config->sample_rate));
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
- devDesc->mAddress = address;
+ devDesc->setAddress(address);
profile->addSupportedDevice(devDesc);
return addOutputProfile(profile);
@@ -113,7 +113,7 @@
config->sample_rate));
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
- devDesc->mAddress = address;
+ devDesc->setAddress(address);
profile->addSupportedDevice(devDesc);
ALOGV("addInputProfile() name %s rate %d mask 0x%08x",
@@ -218,6 +218,15 @@
mHandle = handle;
}
+bool HwModule::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const {
+ for (const auto &route : mRoutes) {
+ if (route->supportsPatch(srcPort, dstPort)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void HwModule::dump(String8 *dst) const
{
dst->appendFormat(" - name: %s\n", getName());
@@ -287,7 +296,7 @@
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
devDesc->setName(String8(device_name));
- devDesc->mAddress = address;
+ devDesc->setAddress(address);
return devDesc;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 179a678..1154654 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -516,7 +516,7 @@
std::string address = getXmlAttribute(cur, Attributes::address);
if (!address.empty()) {
ALOGV("%s: address=%s for %s", __func__, address.c_str(), name.c_str());
- deviceDesc->mAddress = String8(address.c_str());
+ deviceDesc->setAddress(String8(address.c_str()));
}
AudioProfileTraits::Collection profiles;
@@ -535,7 +535,7 @@
return Status::fromStatusT(status);
}
ALOGV("%s: adding device tag %s type %08x address %s", __func__,
- deviceDesc->getName().string(), type, deviceDesc->mAddress.string());
+ deviceDesc->getName().string(), type, deviceDesc->address().string());
return deviceDesc;
}
@@ -742,7 +742,7 @@
}
ALOGV("%s: %s=%s",
__func__, tag, reinterpret_cast<const char*>(pointDefinition.get()));
- Vector<int32_t> point;
+ std::vector<int32_t> point;
collectionFromString<DefaultTraits<int32_t>>(
reinterpret_cast<const char*>(pointDefinition.get()), point, ",");
if (point.size() != 2) {
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index b5ecbf9..42c52de 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -198,27 +198,10 @@
<!-- End of Volume section -->
- <?disabledUntilHalV5_0
- <!-- Surround configuration -->
+ <!-- Surround Sound configuration -->
- <surroundSound>
- <!-- Each of the listed formats gets an entry in Surround Settings dialog.
- There must be a corresponding Java ENCODING_... contant defined in AudioFormat.java,
- and a display name defined in AudioFormat.toDisplayName. For the formats that don't
- need a dedicated Surrond Settings dialog entry, a subformats list should be used. -->
- <formats>
- <format name="AUDIO_FORMAT_AC3" />
- <format name="AUDIO_FORMAT_E_AC3" />
- <format name="AUDIO_FORMAT_E_AC3_JOC" />
- <format name="AUDIO_FORMAT_DOLBY_TRUEHD" />
- <format name="AUDIO_FORMAT_DTS" />
- <format name="AUDIO_FORMAT_DTS_HD" />
- <format name="AUDIO_FORMAT_AAC_LC" subformats="AUDIO_FORMAT_AAC_HE_V1 AUDIO_FORMAT_AAC_HE_V2 AUDIO_FORMAT_AAC_ELD AUDIO_FORMAT_AAC_XHE" />
- <format name="AUDIO_FORMAT_AC4" />
- </formats>
- </surroundSound>
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
- <!-- End of Surround configuration -->
- ?>
+ <!-- End of Surround Sound configuration -->
</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/surround_sound_configuration_5_0.xml b/services/audiopolicy/config/surround_sound_configuration_5_0.xml
new file mode 100644
index 0000000..590a181
--- /dev/null
+++ b/services/audiopolicy/config/surround_sound_configuration_5_0.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<surroundSound>
+ <!-- Each of the listed formats gets an entry in Surround Settings dialog on TV devices.
+ There must be a corresponding Java ENCODING_... contant defined in AudioFormat.java,
+ and a display name defined in AudioFormat.toDisplayName. For the formats that don't
+ need a dedicated Surrond Settings dialog entry, a subformats list has to be used. -->
+ <formats>
+ <format name="AUDIO_FORMAT_AC3" />
+ <format name="AUDIO_FORMAT_E_AC3" />
+ <format name="AUDIO_FORMAT_E_AC3_JOC" />
+ <format name="AUDIO_FORMAT_DOLBY_TRUEHD" />
+ <format name="AUDIO_FORMAT_DTS" />
+ <format name="AUDIO_FORMAT_DTS_HD" />
+ <format name="AUDIO_FORMAT_AAC_LC" subformats="AUDIO_FORMAT_AAC_HE_V1 AUDIO_FORMAT_AAC_HE_V2 AUDIO_FORMAT_AAC_ELD AUDIO_FORMAT_AAC_XHE" />
+ <format name="AUDIO_FORMAT_AC4" />
+ </formats>
+</surroundSound>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index f07b797..5544821 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -149,13 +149,13 @@
// Before checking outputs, broadcast connect event to allow HAL to retrieve dynamic
// parameters on newly connected devices (instead of opening the outputs...)
- broadcastDeviceConnectionState(device, state, devDesc->mAddress);
+ broadcastDeviceConnectionState(device, state, devDesc->address());
- if (checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress) != NO_ERROR) {
+ if (checkOutputsForDevice(devDesc, state, outputs, devDesc->address()) != NO_ERROR) {
mAvailableOutputDevices.remove(devDesc);
broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- devDesc->mAddress);
+ devDesc->address());
return INVALID_OPERATION;
}
// Propagate device availability to Engine
@@ -178,12 +178,12 @@
ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
// Send Disconnect to HALs
- broadcastDeviceConnectionState(device, state, devDesc->mAddress);
+ broadcastDeviceConnectionState(device, state, devDesc->address());
// remove device from available output devices
mAvailableOutputDevices.remove(devDesc);
- checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
+ checkOutputsForDevice(devDesc, state, outputs, devDesc->address());
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
@@ -265,11 +265,11 @@
// Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic
// parameters on newly connected devices (instead of opening the inputs...)
- broadcastDeviceConnectionState(device, state, devDesc->mAddress);
+ broadcastDeviceConnectionState(device, state, devDesc->address());
- if (checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress) != NO_ERROR) {
+ if (checkInputsForDevice(devDesc, state, inputs, devDesc->address()) != NO_ERROR) {
broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- devDesc->mAddress);
+ devDesc->address());
return INVALID_OPERATION;
}
@@ -294,9 +294,9 @@
ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
// Set Disconnect to HALs
- broadcastDeviceConnectionState(device, state, devDesc->mAddress);
+ broadcastDeviceConnectionState(device, state, devDesc->address());
- checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress);
+ checkInputsForDevice(devDesc, state, inputs, devDesc->address());
mAvailableInputDevices.remove(devDesc);
// Propagate device availability to Engine
@@ -780,17 +780,39 @@
return output;
}
-status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- uid_t uid,
- const audio_config_t *config,
- audio_output_flags_t *flags,
- audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId)
+status_t AudioPolicyManager::getAudioAttributes(audio_attributes_t *dstAttr,
+ const audio_attributes_t *srcAttr,
+ audio_stream_type_t srcStream)
{
- audio_attributes_t attributes;
+ if (srcAttr != NULL) {
+ if (!isValidAttributes(srcAttr)) {
+ ALOGE("%s invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
+ __func__,
+ srcAttr->usage, srcAttr->content_type, srcAttr->flags,
+ srcAttr->tags);
+ return BAD_VALUE;
+ }
+ *dstAttr = *srcAttr;
+ } else {
+ if (srcStream < AUDIO_STREAM_MIN || srcStream >= AUDIO_STREAM_PUBLIC_CNT) {
+ ALOGE("%s: invalid stream type", __func__);
+ return BAD_VALUE;
+ }
+ stream_type_to_audio_attributes(srcStream, dstAttr);
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getOutputForAttrInt(audio_attributes_t *resultAttr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ const audio_attributes_t *attr,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t *flags,
+ audio_port_handle_t *selectedDeviceId)
+{
DeviceVector outputDevices;
routing_strategy strategy;
audio_devices_t device;
@@ -798,35 +820,20 @@
audio_devices_t msdDevice =
getModuleDeviceTypes(mAvailableOutputDevices, AUDIO_HARDWARE_MODULE_ID_MSD);
- // The supplied portId must be AUDIO_PORT_HANDLE_NONE
- if (*portId != AUDIO_PORT_HANDLE_NONE) {
- return INVALID_OPERATION;
+ status_t status = getAudioAttributes(resultAttr, attr, *stream);
+ if (status != NO_ERROR) {
+ return status;
}
- if (attr != NULL) {
- if (!isValidAttributes(attr)) {
- ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
- attr->usage, attr->content_type, attr->flags,
- attr->tags);
- return BAD_VALUE;
- }
- attributes = *attr;
- } else {
- if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
- ALOGE("getOutputForAttr(): invalid stream type");
- return BAD_VALUE;
- }
- stream_type_to_audio_attributes(*stream, &attributes);
- }
-
- ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
+ ALOGV("%s usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
- attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
+ __func__,
+ resultAttr->usage, resultAttr->content_type, resultAttr->tags, resultAttr->flags,
session, requestedDeviceId);
- *stream = streamTypefromAttributesInt(&attributes);
+ *stream = streamTypefromAttributesInt(resultAttr);
- strategy = getStrategyForAttr(&attributes);
+ strategy = getStrategyForAttr(resultAttr);
// First check for explicit routing (eg. setPreferredDevice)
if (requestedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -836,30 +843,30 @@
} else {
// If no explict route, is there a matching dynamic policy that applies?
sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+ if (mPolicyMixes.getOutputForAttr(*resultAttr, uid, desc) == NO_ERROR) {
ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
if (!audio_has_proportional_frames(config->format)) {
return BAD_VALUE;
}
- *stream = streamTypefromAttributesInt(&attributes);
+ *stream = streamTypefromAttributesInt(resultAttr);
*output = desc->mIoHandle;
AudioMix *mix = desc->mPolicyMix;
sp<DeviceDescriptor> deviceDesc =
mAvailableOutputDevices.getDevice(mix->mDeviceType, mix->mDeviceAddress);
*selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
- ALOGV("getOutputForAttr() returns output %d", *output);
- goto exit;
+ ALOGV("%s returns output %d", __func__, *output);
+ return NO_ERROR;
}
// Virtual sources must always be dynamicaly or explicitly routed
- if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
- ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("%s no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE", __func__);
return BAD_VALUE;
}
device = getDeviceForStrategy(strategy, false /*fromCache*/);
}
- if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+ if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
@@ -869,7 +876,7 @@
// to getOutputForDevice.
// TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
- (*stream == AUDIO_STREAM_MUSIC || attributes.usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
+ (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
audio_is_linear_pcm(config->format) &&
isInCall()) {
if (requestedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -880,9 +887,9 @@
}
}
- ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
+ ALOGV("%s device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
"flags %#x",
- device, config->sample_rate, config->format, config->channel_mask, *flags);
+ __func__, device, config->sample_rate, config->format, config->channel_mask, *flags);
*output = AUDIO_IO_HANDLE_NONE;
if (msdDevice != AUDIO_DEVICE_NONE) {
@@ -903,25 +910,50 @@
}
outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
- *selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
- : AUDIO_PORT_HANDLE_NONE;
+ *selectedDeviceId = getFirstDeviceId(outputDevices);
-exit:
+ ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId);
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t *flags,
+ audio_port_handle_t *selectedDeviceId,
+ audio_port_handle_t *portId)
+{
+ // The supplied portId must be AUDIO_PORT_HANDLE_NONE
+ if (*portId != AUDIO_PORT_HANDLE_NONE) {
+ return INVALID_OPERATION;
+ }
+ const audio_port_handle_t requestedDeviceId = *selectedDeviceId;
+ audio_attributes_t resultAttr;
+ status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
+ config, flags, selectedDeviceId);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
.format = config->format,
.channel_mask = config->channel_mask };
*portId = AudioPort::getNextUniqueId();
sp<TrackClientDescriptor> clientDesc =
- new TrackClientDescriptor(*portId, uid, session, attributes, clientConfig,
+ new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
requestedDeviceId, *stream,
- getStrategyForAttr(&attributes),
+ getStrategyForAttr(&resultAttr),
*flags);
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
outputDesc->addClient(clientDesc);
- ALOGV(" getOutputForAttr() returns output %d selectedDeviceId %d for port ID %d",
- *output, *selectedDeviceId, *portId);
+ ALOGV("%s returns output %d selectedDeviceId %d for port ID %d",
+ __func__, *output, requestedDeviceId, *portId);
return NO_ERROR;
}
@@ -1020,8 +1052,7 @@
new SwAudioOutputDescriptor(profile, mpClientInterface);
DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
- String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
- : String8("");
+ String8 address = getFirstDeviceAddress(outputDevices);
// MSD patch may be using the only output stream that can service this request. Release
// MSD patch to prioritize this request over any active output on MSD.
@@ -1282,6 +1313,10 @@
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
+ // Flags which must be present on both the request and the selected output
+ static const audio_output_flags_t kMandatedFlags = (audio_output_flags_t)
+ (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
+
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
@@ -1305,6 +1340,10 @@
continue;
}
}
+ if ((kMandatedFlags & flags) !=
+ (kMandatedFlags & outputDesc->mProfile->getFlags())) {
+ continue;
+ }
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
@@ -1722,10 +1761,7 @@
}
// Explicit routing?
- sp<DeviceDescriptor> deviceDesc;
- if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- deviceDesc = mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
- }
+ sp<DeviceDescriptor> deviceDesc = mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
// special case for mmap capture: if an input IO handle is specified, we reuse this input if
// possible
@@ -1831,8 +1867,7 @@
exit:
inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
- *selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
- : AUDIO_PORT_HANDLE_NONE;
+ *selectedDeviceId = getFirstDeviceId(inputDevices);
isSoundTrigger = inputSource == AUDIO_SOURCE_HOTWORD &&
mSoundTriggerSessions.indexOfKey(session) > 0;
@@ -1963,7 +1998,7 @@
if (address == "") {
DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
// the inputs vector must be of size >= 1, but we don't want to crash here
- address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
+ address = getFirstDeviceAddress(inputDevices);
}
status_t status = inputDesc->open(&lConfig, device, address,
@@ -2396,6 +2431,33 @@
return mEffects.registerEffect(desc, io, strategy, session, id);
}
+status_t AudioPolicyManager::unregisterEffect(int id)
+{
+ if (mEffects.getEffect(id) == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ if (mEffects.isEffectEnabled(id)) {
+ ALOGW("%s effect %d enabled", __FUNCTION__, id);
+ setEffectEnabled(id, false);
+ }
+ return mEffects.unregisterEffect(id);
+}
+
+status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
+{
+ sp<EffectDescriptor> effect = mEffects.getEffect(id);
+ if (effect == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ status_t status = mEffects.setEffectEnabled(id, enabled);
+ if (status == NO_ERROR) {
+ mInputs.trackEffectEnabled(effect, enabled);
+ }
+ return status;
+}
+
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
bool active = false;
@@ -2612,6 +2674,59 @@
}
}
+status_t AudioPolicyManager::setUidDeviceAffinities(uid_t uid,
+ const Vector<AudioDeviceTypeAddr>& devices) {
+ ALOGV("%s() uid=%d num devices %zu", __FUNCTION__, uid, devices.size());
+ // uid/device affinity is only for output devices
+ for (size_t i = 0; i < devices.size(); i++) {
+ if (!audio_is_output_device(devices[i].mType)) {
+ ALOGE("setUidDeviceAffinities() device=%08x is NOT an output device",
+ devices[i].mType);
+ return BAD_VALUE;
+ }
+ }
+ status_t res = mPolicyMixes.setUidDeviceAffinities(uid, devices);
+ if (res == NO_ERROR) {
+ // reevaluate outputs for all given devices
+ for (size_t i = 0; i < devices.size(); i++) {
+ sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
+ devices[i].mType, devices[i].mAddress, String8());
+ SortedVector<audio_io_handle_t> outputs;
+ if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ outputs,
+ devDesc->address()) != NO_ERROR) {
+ ALOGE("setUidDeviceAffinities() error in checkOutputsForDevice for device=%08x"
+ " addr=%s", devices[i].mType, devices[i].mAddress.string());
+ return INVALID_OPERATION;
+ }
+ }
+ }
+ return res;
+}
+
+status_t AudioPolicyManager::removeUidDeviceAffinities(uid_t uid) {
+ ALOGV("%s() uid=%d", __FUNCTION__, uid);
+ Vector<AudioDeviceTypeAddr> devices;
+ status_t res = mPolicyMixes.getDevicesForUid(uid, devices);
+ if (res == NO_ERROR) {
+ // reevaluate outputs for all found devices
+ for (size_t i = 0; i < devices.size(); i++) {
+ sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
+ devices[i].mType, devices[i].mAddress, String8());
+ SortedVector<audio_io_handle_t> outputs;
+ if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ outputs,
+ devDesc->address()) != NO_ERROR) {
+ ALOGE("%s() error in checkOutputsForDevice for device=%08x addr=%s",
+ __FUNCTION__, devices[i].mType, devices[i].mAddress.string());
+ return INVALID_OPERATION;
+ }
+ }
+ }
+
+ return res;
+}
+
void AudioPolicyManager::dump(String8 *dst) const
{
dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
@@ -2930,7 +3045,7 @@
}
if (!outputDesc->mProfile->isCompatibleProfile(devDesc->type(),
- devDesc->mAddress,
+ devDesc->address(),
patch->sources[0].sample_rate,
NULL, // updatedSamplingRate
patch->sources[0].format,
@@ -2987,7 +3102,7 @@
}
if (!inputDesc->mProfile->isCompatibleProfile(devDesc->type(),
- devDesc->mAddress,
+ devDesc->address(),
patch->sinks[0].sample_rate,
NULL, /*updatedSampleRate*/
patch->sinks[0].format,
@@ -3050,8 +3165,10 @@
// create a software bridge in PatchPanel if:
// - source and sink devices are on different HW modules OR
// - audio HAL version is < 3.0
+ // - audio HAL version is >= 3.0 but no route has been declared between devices
if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
- (srcDeviceDesc->mModule->getHalVersionMajor() < 3)) {
+ (srcDeviceDesc->getModuleVersionMajor() < 3) ||
+ !srcDeviceDesc->getModule()->supportsPatch(srcDeviceDesc, sinkDeviceDesc)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
@@ -3395,16 +3512,25 @@
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
- sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
- srcDeviceDesc->getAudioPort()->mModule->getHalVersionMajor() >= 3 &&
+ if (srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) &&
+ srcDeviceDesc->getModuleVersionMajor() >= 3 &&
+ sinkDeviceDesc->getModule()->supportsPatch(srcDeviceDesc, sinkDeviceDesc) &&
srcDeviceDesc->getAudioPort()->mGains.size() > 0) {
- ALOGV("%s AUDIO_DEVICE_API_VERSION_3_0", __FUNCTION__);
+ ALOGV("%s Device to Device route supported by >=3.0 HAL", __FUNCTION__);
+ // TODO: may explicitly specify whether we should use HW or SW patch
// create patch between src device and output device
// create Hwoutput and add to mHwOutputs
} else {
- SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(sinkDevice, mOutputs);
- audio_io_handle_t output = selectOutput(outputs);
+ audio_attributes_t resultAttr;
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = sourceDesc->config().sample_rate;
+ config.channel_mask = sourceDesc->config().channel_mask;
+ config.format = sourceDesc->config().format;
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ getOutputForAttrInt(&resultAttr, &output, AUDIO_SESSION_NONE,
+ &attributes, &stream, sourceDesc->uid(), &config, &flags, &selectedDeviceId);
if (output == AUDIO_IO_HANDLE_NONE) {
ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevice);
return INVALID_OPERATION;
@@ -3437,6 +3563,13 @@
__FUNCTION__, status);
return INVALID_OPERATION;
}
+
+ if (outputDesc->getClient(sourceDesc->portId()) != nullptr) {
+ ALOGW("%s source portId has already been attached to outputDesc", __func__);
+ return INVALID_OPERATION;
+ }
+ outputDesc->addClient(sourceDesc);
+
uint32_t delayMs = 0;
status = startSource(outputDesc, sourceDesc, &delayMs);
@@ -3615,7 +3748,7 @@
AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
- String8 address = hdmiOutputDevices[i]->mAddress;
+ String8 address = hdmiOutputDevices[i]->address();
String8 name = hdmiOutputDevices[i]->getName();
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
@@ -3635,7 +3768,7 @@
AUDIO_DEVICE_IN_HDMI);
for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
- String8 address = hdmiInputDevices[i]->mAddress;
+ String8 address = hdmiInputDevices[i]->address();
String8 name = hdmiInputDevices[i]->getName();
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
@@ -3661,13 +3794,11 @@
void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
{
- Vector<sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
-
ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
- RecordClientVector clients = activeDesc->clientsList(true /*activeOnly*/);
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
+ RecordClientVector clients = inputDesc->clientsList(false /*activeOnly*/);
for (const auto& client : clients) {
if (uid == client->uid()) {
client->setAppState(state);
@@ -3893,8 +4024,7 @@
const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
const DeviceVector &devicesForType = supportedDevices.getDevicesFromTypeMask(
profileType);
- String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
- : String8("");
+ String8 address = getFirstDeviceAddress(devicesForType);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status = outputDesc->open(nullptr, profileType, address,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
@@ -3948,8 +4078,7 @@
DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(profileType);
// the inputs vector must be of size >= 1, but we don't want to crash here
- String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
- : String8("");
+ String8 address = getFirstDeviceAddress(inputDevices);
ALOGV(" for input device 0x%x using address %s", profileType, address.string());
ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
@@ -4011,11 +4140,11 @@
}
// If microphones address is empty, set it according to device type
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->mAddress.isEmpty()) {
+ if (mAvailableInputDevices[i]->address().isEmpty()) {
if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
- mAvailableInputDevices[i]->mAddress = String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ mAvailableInputDevices[i]->setAddress(String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS));
} else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
- mAvailableInputDevices[i]->mAddress = String8(AUDIO_BACK_MICROPHONE_ADDRESS);
+ mAvailableInputDevices[i]->setAddress(String8(AUDIO_BACK_MICROPHONE_ADDRESS));
}
}
}
@@ -5184,8 +5313,9 @@
if (!deviceList.isEmpty()) {
PatchBuilder patchBuilder;
patchBuilder.addSource(outputDesc);
- for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
- patchBuilder.addSink(deviceList.itemAt(i));
+ ALOG_ASSERT(deviceList.size() <= AUDIO_PATCH_PORTS_MAX, "Too many sink ports");
+ for (const auto &device : deviceList) {
+ patchBuilder.addSink(device);
}
installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d0708b8..9eb1dcf 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -164,14 +164,8 @@
uint32_t strategy,
int session,
int id);
- virtual status_t unregisterEffect(int id)
- {
- return mEffects.unregisterEffect(id);
- }
- virtual status_t setEffectEnabled(int id, bool enabled)
- {
- return mEffects.setEffectEnabled(id, enabled);
- }
+ virtual status_t unregisterEffect(int id);
+ virtual status_t setEffectEnabled(int id, bool enabled);
virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
// return whether a stream is playing remotely, override to change the definition of
@@ -224,6 +218,9 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes);
virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
+ virtual status_t setUidDeviceAffinities(uid_t uid,
+ const Vector<AudioDeviceTypeAddr>& devices);
+ virtual status_t removeUidDeviceAffinities(uid_t uid);
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
@@ -519,6 +516,19 @@
return mAvailableInputDevices.getDeviceTypesFromHwModule(
mPrimaryOutput->getModuleHandle());
}
+ /**
+ * @brief getFirstDeviceId of the Device Vector
+ * @return if the collection is not empty, it returns the first device Id,
+ * otherwise AUDIO_PORT_HANDLE_NONE
+ */
+ audio_port_handle_t getFirstDeviceId(const DeviceVector &devices) const
+ {
+ return (devices.size() > 0) ? devices.itemAt(0)->getId() : AUDIO_PORT_HANDLE_NONE;
+ }
+ String8 getFirstDeviceAddress(const DeviceVector &devices) const
+ {
+ return (devices.size() > 0) ? devices.itemAt(0)->address() : String8("");
+ }
uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
@@ -661,6 +671,21 @@
const String8& address /*in*/,
SortedVector<audio_io_handle_t>& outputs /*out*/);
uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
+ // internal method, get audio_attributes_t from either a source audio_attributes_t
+ // or audio_stream_type_t, respectively.
+ status_t getAudioAttributes(audio_attributes_t *dstAttr,
+ const audio_attributes_t *srcAttr,
+ audio_stream_type_t srcStream);
+ // internal method, called by getOutputForAttr() and connectAudioSource.
+ status_t getOutputForAttrInt(audio_attributes_t *resultAttr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ const audio_attributes_t *attr,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t *flags,
+ audio_port_handle_t *selectedDeviceId);
// internal method to return the output handle for the given device and format
audio_io_handle_t getOutputForDevice(
audio_devices_t device,
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 21fffec..d826192 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -209,12 +209,17 @@
}
void AudioPolicyService::AudioPolicyClient::onRecordingConfigurationUpdate(
- int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle)
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source)
{
mAudioPolicyService->onRecordingConfigurationUpdate(event, clientInfo,
- clientConfig, deviceConfig, patchHandle);
+ clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
}
audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use)
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 439764b..80503fd 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -1037,6 +1037,31 @@
}
}
+status_t AudioPolicyService::setUidDeviceAffinities(uid_t uid,
+ const Vector<AudioDeviceTypeAddr>& devices) {
+ Mutex::Autolock _l(mLock);
+ if(!modifyAudioRoutingAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ AutoCallerClear acc;
+ return mAudioPolicyManager->setUidDeviceAffinities(uid, devices);
+}
+
+status_t AudioPolicyService::removeUidDeviceAffinities(uid_t uid) {
+ Mutex::Autolock _l(mLock);
+ if(!modifyAudioRoutingAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ AutoCallerClear acc;
+ return mAudioPolicyManager->removeUidDeviceAffinities(uid);
+}
+
status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index ee5d6ff..416817f 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -38,6 +38,7 @@
#include <media/AudioEffect.h>
#include <media/AudioParameter.h>
#include <mediautils/ServiceUtilities.h>
+#include <sensorprivacy/SensorPrivacyManager.h>
#include <system/audio.h>
#include <system/audio_policy.h>
@@ -84,6 +85,9 @@
mUidPolicy = new UidPolicy(this);
mUidPolicy->registerSelf();
+
+ mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
+ mSensorPrivacyPolicy->registerSelf();
}
AudioPolicyService::~AudioPolicyService()
@@ -99,6 +103,9 @@
mUidPolicy->unregisterSelf();
mUidPolicy.clear();
+
+ mSensorPrivacyPolicy->unregisterSelf();
+ mSensorPrivacyPolicy.clear();
}
// A notification client is always registered by AudioSystem when the client process
@@ -208,22 +215,34 @@
}
}
-void AudioPolicyService::onRecordingConfigurationUpdate(int event,
- const record_client_info_t *clientInfo, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+void AudioPolicyService::onRecordingConfigurationUpdate(
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source)
{
mOutputCommandThread->recordingConfigurationUpdateCommand(event, clientInfo,
- clientConfig, deviceConfig, patchHandle);
+ clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
}
-void AudioPolicyService::doOnRecordingConfigurationUpdate(int event,
- const record_client_info_t *clientInfo, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+void AudioPolicyService::doOnRecordingConfigurationUpdate(
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source)
{
Mutex::Autolock _l(mNotificationClientsLock);
for (size_t i = 0; i < mNotificationClients.size(); i++) {
mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, clientInfo,
- clientConfig, deviceConfig, patchHandle);
+ clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
}
}
@@ -291,13 +310,18 @@
}
void AudioPolicyService::NotificationClient::onRecordingConfigurationUpdate(
- int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle)
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source)
{
if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
- clientConfig, deviceConfig, patchHandle);
+ clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
}
}
@@ -375,6 +399,12 @@
bool isAssistantOnTop = false;
bool isSensitiveActive = false;
+ // if Sensor Privacy is enabled then all recordings should be silenced.
+ if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
+ silenceAllRecordings_l();
+ return;
+ }
+
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!current->active) continue;
@@ -445,6 +475,13 @@
}
}
+void AudioPolicyService::silenceAllRecordings_l() {
+ for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
+ sp<AudioRecordClient> current = mAudioRecordClients[i];
+ setAppState_l(current->uid, APP_STATE_IDLE);
+ }
+}
+
/* static */
app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
switch (amState) {
@@ -858,6 +895,31 @@
return it != mA11yUids.end();
}
+// ----------- AudioPolicyService::SensorPrivacyService implementation ----------
+void AudioPolicyService::SensorPrivacyPolicy::registerSelf() {
+ SensorPrivacyManager spm;
+ mSensorPrivacyEnabled = spm.isSensorPrivacyEnabled();
+ spm.addSensorPrivacyListener(this);
+}
+
+void AudioPolicyService::SensorPrivacyPolicy::unregisterSelf() {
+ SensorPrivacyManager spm;
+ spm.removeSensorPrivacyListener(this);
+}
+
+bool AudioPolicyService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
+ return mSensorPrivacyEnabled;
+}
+
+binder::Status AudioPolicyService::SensorPrivacyPolicy::onSensorPrivacyChanged(bool enabled) {
+ mSensorPrivacyEnabled = enabled;
+ sp<AudioPolicyService> service = mService.promote();
+ if (service != nullptr) {
+ service->updateUidStates();
+ }
+ return binder::Status::ok();
+}
+
// ----------- AudioPolicyService::AudioCommandThread implementation ----------
AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name,
@@ -1026,8 +1088,9 @@
}
mLock.unlock();
svc->doOnRecordingConfigurationUpdate(data->mEvent, &data->mClientInfo,
- &data->mClientConfig, &data->mDeviceConfig,
- data->mPatchHandle);
+ &data->mClientConfig, data->mClientEffects,
+ &data->mDeviceConfig, data->mEffects,
+ data->mPatchHandle, data->mSource);
mLock.lock();
} break;
default:
@@ -1262,9 +1325,14 @@
}
void AudioPolicyService::AudioCommandThread::recordingConfigurationUpdateCommand(
- int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle)
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source)
{
sp<AudioCommand>command = new AudioCommand();
command->mCommand = RECORDING_CONFIGURATION_UPDATE;
@@ -1272,8 +1340,11 @@
data->mEvent = event;
data->mClientInfo = *clientInfo;
data->mClientConfig = *clientConfig;
+ data->mClientEffects = clientEffects;
data->mDeviceConfig = *deviceConfig;
+ data->mEffects = effects;
data->mPatchHandle = patchHandle;
+ data->mSource = source;
command->mParam = data;
ALOGV("AudioCommandThread() adding recording configuration update event %d, source %d uid %u",
event, clientInfo->source, clientInfo->uid);
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 23c3daa..959e757 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -33,6 +33,7 @@
#include <media/AudioPolicy.h>
#include "AudioPolicyEffects.h"
#include "managerdefault/AudioPolicyManager.h"
+#include <android/hardware/BnSensorPrivacyListener.h>
#include <unordered_map>
@@ -198,6 +199,10 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
+ virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+
+ virtual status_t removeUidDeviceAffinities(uid_t uid);
+
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId);
@@ -239,12 +244,22 @@
void onDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
void doOnDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
- void onRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
- void doOnRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+ void onRecordingConfigurationUpdate(int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
+ void doOnRecordingConfigurationUpdate(int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
private:
AudioPolicyService() ANDROID_API;
@@ -279,6 +294,8 @@
void updateUidStates();
void updateUidStates_l();
+ void silenceAllRecordings_l();
+
static bool isPrivacySensitive(audio_source_t source);
// If recording we need to make sure the UID is allowed to do that. If the UID is idle
@@ -334,6 +351,27 @@
std::vector<uid_t> mA11yUids;
};
+ // If sensor privacy is enabled then all apps, including those that are active, should be
+ // prevented from recording. This is handled similar to idle UIDs, any app that attempts
+ // to record while sensor privacy is enabled will receive buffers with zeros. As soon as
+ // sensor privacy is disabled active apps will receive the expected data when recording.
+ class SensorPrivacyPolicy : public hardware::BnSensorPrivacyListener {
+ public:
+ explicit SensorPrivacyPolicy(wp<AudioPolicyService> service)
+ : mService(service) {}
+
+ void registerSelf();
+ void unregisterSelf();
+
+ bool isSensorPrivacyEnabled();
+
+ binder::Status onSensorPrivacyChanged(bool enabled);
+
+ private:
+ wp<AudioPolicyService> mService;
+ std::atomic_bool mSensorPrivacyEnabled;
+ };
+
// Thread used to send audio config commands to audio flinger
// For audio config commands, it is necessary because audio flinger requires that the calling
// process (user) has permission to modify audio settings.
@@ -385,13 +423,17 @@
void updateAudioPatchListCommand();
status_t setAudioPortConfigCommand(const struct audio_port_config *config,
int delayMs);
- void dynamicPolicyMixStateUpdateCommand(const String8& regId, int32_t state);
+ void dynamicPolicyMixStateUpdateCommand(const String8& regId,
+ int32_t state);
void recordingConfigurationUpdateCommand(
- int event,
- const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle);
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
void insertCommand_l(AudioCommand *command, int delayMs = 0);
private:
class AudioCommandData;
@@ -476,8 +518,11 @@
int mEvent;
record_client_info_t mClientInfo;
struct audio_config_base mClientConfig;
+ std::vector<effect_descriptor_t> mClientEffects;
struct audio_config_base mDeviceConfig;
+ std::vector<effect_descriptor_t> mEffects;
audio_patch_handle_t mPatchHandle;
+ audio_source_t mSource;
};
Mutex mLock;
@@ -581,9 +626,13 @@
virtual void onAudioPatchListUpdate();
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
virtual void onRecordingConfigurationUpdate(int event,
- const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
@@ -601,12 +650,17 @@
void onAudioPortListUpdate();
void onAudioPatchListUpdate();
- void onDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
+ void onDynamicPolicyMixStateUpdate(const String8& regId,
+ int32_t state);
void onRecordingConfigurationUpdate(
- int event, const record_client_info_t *clientInfo,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle);
+ int event,
+ const record_client_info_t *clientInfo,
+ const audio_config_base_t *clientConfig,
+ std::vector<effect_descriptor_t> clientEffects,
+ const audio_config_base_t *deviceConfig,
+ std::vector<effect_descriptor_t> effects,
+ audio_patch_handle_t patchHandle,
+ audio_source_t source);
void setAudioPortCallbacksEnabled(bool enabled);
uid_t uid() {
@@ -718,6 +772,8 @@
audio_mode_t mPhoneState;
sp<UidPolicy> mUidPolicy;
+ sp<SensorPrivacyPolicy> mSensorPrivacyPolicy;
+
DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> > mAudioRecordClients;
DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> > mAudioPlaybackClients;
};
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 2ff7675..6ae354b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -75,11 +75,14 @@
void onAudioPatchListUpdate() override { }
audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t /*use*/) override { return 0; }
void onDynamicPolicyMixStateUpdate(String8 /*regId*/, int32_t /*state*/) override { }
- void onRecordingConfigurationUpdate(int /*event*/,
- const record_client_info_t* /*clientInfo*/,
- const struct audio_config_base* /*clientConfig*/,
- const struct audio_config_base* /*deviceConfig*/,
- audio_patch_handle_t /*patchHandle*/) override { }
+ void onRecordingConfigurationUpdate(int event __unused,
+ const record_client_info_t *clientInfo __unused,
+ const audio_config_base_t *clientConfig __unused,
+ std::vector<effect_descriptor_t> clientEffects __unused,
+ const audio_config_base_t *deviceConfig __unused,
+ std::vector<effect_descriptor_t> effects __unused,
+ audio_patch_handle_t patchHandle __unused,
+ audio_source_t source __unused) override { }
};
} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index c1a4c11..46fbc3e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -256,7 +256,7 @@
Vector<int32_t> outputStreamIds;
std::vector<std::string> requestedPhysicalIds;
if (request.mSurfaceList.size() > 0) {
- for (sp<Surface> surface : request.mSurfaceList) {
+ for (const sp<Surface>& surface : request.mSurfaceList) {
if (surface == 0) continue;
int32_t streamId;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 856af13..12fbf82 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -175,7 +175,7 @@
session->interfaceChain([](
::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
ALOGV("Session interface chain:");
- for (auto iface : interfaceChain) {
+ for (const auto& iface : interfaceChain) {
ALOGV(" %s", iface.c_str());
}
});
diff --git a/services/camera/libcameraservice/hidl/AidlCameraDeviceCallbacks.cpp b/services/camera/libcameraservice/hidl/AidlCameraDeviceCallbacks.cpp
index e5e5024..f063506 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraDeviceCallbacks.cpp
+++ b/services/camera/libcameraservice/hidl/AidlCameraDeviceCallbacks.cpp
@@ -139,18 +139,18 @@
}
CameraMetadataNative &result = resultWrapper->mResult;
auto resultExtras = resultWrapper->mResultExtras;
- auto &physicalCaptureResultInfos = resultWrapper->mPhysicalCaptureResultInfos;
HCaptureResultExtras hResultExtras =
hardware::cameraservice::utils::conversion::convertToHidl(resultExtras);
- hidl_vec<HPhysicalCaptureResultInfo> hPhysicalCaptureResultInfos =
- hardware::cameraservice::utils::conversion::convertToHidl(
- physicalCaptureResultInfos, converter->mCaptureResultMetadataQueue);
// Convert Metadata into HCameraMetadata;
FmqSizeOrMetadata hResult;
const camera_metadata_t *rawMetadata = result.getAndLock();
converter->convertResultMetadataToHidl(rawMetadata, &hResult);
result.unlock(rawMetadata);
+ auto &physicalCaptureResultInfos = resultWrapper->mPhysicalCaptureResultInfos;
+ hidl_vec<HPhysicalCaptureResultInfo> hPhysicalCaptureResultInfos =
+ hardware::cameraservice::utils::conversion::convertToHidl(
+ physicalCaptureResultInfos, converter->mCaptureResultMetadataQueue);
auto ret = converter->mBase->onResultReceived(hResult, hResultExtras,
hPhysicalCaptureResultInfos);
if (!ret.isOk()) {
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index 582ce34..a87812b 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -89,8 +89,9 @@
for (auto &handle : windowHandles) {
iGBPs.push_back(new H2BGraphicBufferProducer(AImageReader_getHGBPFromHandle(handle)));
}
+ String16 physicalCameraId16(hOutputConfiguration.physicalCameraId.c_str());
hardware::camera2::params::OutputConfiguration outputConfiguration(
- iGBPs, convertFromHidl(hOutputConfiguration.rotation),
+ iGBPs, convertFromHidl(hOutputConfiguration.rotation), physicalCameraId16,
hOutputConfiguration.windowGroupId, OutputConfiguration::SURFACE_TYPE_UNKNOWN, 0, 0,
(windowHandles.size() > 1));
return outputConfiguration;
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 789548d..3b6dc80 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -108,6 +108,7 @@
LOCAL_MODULE := mediaswcodec
LOCAL_INIT_RC := mediaswcodec.rc
LOCAL_32_BIT_ONLY := true
+LOCAL_SANITIZE := scudo
sanitizer_runtime_libraries :=
llndk_libraries :=
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 386abb2..79fea25 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -37,6 +37,12 @@
static const char kVendorSeccompPolicyPath[] =
"/vendor/etc/seccomp_policy/mediacodec.policy";
+// Disable Scudo's mismatch allocation check, as it is being triggered
+// by some third party code.
+extern "C" const char *__scudo_default_options() {
+ return "DeallocationTypeMismatch=false";
+}
+
int main(int argc __unused, char** /*argv*/)
{
LOG(INFO) << "media swcodec service starting";
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 7c9c727..e31eadc 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -38,13 +38,6 @@
LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaextractor.policy
-# extractor libraries
-LOCAL_REQUIRED_MODULES += \
- libmkvextractor \
- libmp4extractor \
- libmpeg2extractor \
- liboggextractor \
-
LOCAL_SRC_FILES := main_extractorservice.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
liblog libbase libicuuc libavservices_minijail
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
index 6d9ed6f..35ac458 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
@@ -21,6 +21,7 @@
getuid: 1
setpriority: 1
sigaltstack: 1
+fstat: 1
fstatfs: 1
newfstatat: 1
restart_syscall: 1
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index ca96f62..bee5d25 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -9,10 +9,8 @@
shared_libs: [
"libaudioutils",
"libbinder",
- "libcutils",
"liblog",
"libmediautils",
- "libnbaio",
"libnblog",
"libutils",
],
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index cca1895..a1fc0ea 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -108,7 +108,7 @@
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpoint> endpoint;
mExclusiveSearchCount++;
- for (const auto ep : mExclusiveStreams) {
+ for (const auto& ep : mExclusiveStreams) {
if (ep->matches(configuration)) {
mExclusiveFoundCount++;
endpoint = ep;
@@ -126,7 +126,7 @@
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpointShared> endpoint;
mSharedSearchCount++;
- for (const auto ep : mSharedStreams) {
+ for (const auto& ep : mSharedStreams) {
if (ep->matches(configuration)) {
mSharedFoundCount++;
endpoint = ep;
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 539735a..4dfb62a 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -65,7 +65,7 @@
result << " Connected: " << mConnected.load() << "\n";
result << " Registered Streams:" << "\n";
result << AAudioServiceStreamShared::dumpHeader() << "\n";
- for (const auto stream : mRegisteredStreams) {
+ for (const auto& stream : mRegisteredStreams) {
result << stream->dump() << "\n";
}
@@ -78,7 +78,7 @@
// @return true if stream found
bool AAudioServiceEndpoint::isStreamRegistered(audio_port_handle_t portHandle) {
std::lock_guard<std::mutex> lock(mLockStreams);
- for (const auto stream : mRegisteredStreams) {
+ for (const auto& stream : mRegisteredStreams) {
if (stream->getPortHandle() == portHandle) {
return true;
}
@@ -89,7 +89,7 @@
void AAudioServiceEndpoint::disconnectRegisteredStreams() {
std::lock_guard<std::mutex> lock(mLockStreams);
mConnected.store(false);
- for (const auto stream : mRegisteredStreams) {
+ for (const auto& stream : mRegisteredStreams) {
ALOGD("disconnectRegisteredStreams() stop and disconnect %p", stream.get());
stream->stop();
stream->disconnect();
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 7ae7f1b..37d105b 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -81,9 +81,10 @@
{ // brackets are for lock_guard
std::lock_guard <std::mutex> lock(mLockStreams);
- for (const auto clientStream : mRegisteredStreams) {
- if (clientStream->isRunning()) {
+ for (const auto& clientStream : mRegisteredStreams) {
+ if (clientStream->isRunning() && !clientStream->isSuspended()) {
int64_t clientFramesWritten = 0;
+
sp<AAudioServiceStreamShared> streamShared =
static_cast<AAudioServiceStreamShared *>(clientStream.get());
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index e4dbee1..6c28083 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -371,7 +371,7 @@
float volume = values[0];
ALOGD("%s(%p) volume[0] = %f", __func__, this, volume);
std::lock_guard<std::mutex> lock(mLockStreams);
- for(const auto stream : mRegisteredStreams) {
+ for(const auto& stream : mRegisteredStreams) {
stream->onVolumeChanged(volume);
}
};
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 923a1a4..1e1c552 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -80,10 +80,14 @@
int64_t mmapFramesWritten = getStreamInternal()->getFramesWritten();
std::lock_guard <std::mutex> lock(mLockStreams);
- for (const auto clientStream : mRegisteredStreams) {
+ for (const auto& clientStream : mRegisteredStreams) {
int64_t clientFramesRead = 0;
bool allowUnderflow = true;
+ if (clientStream->isSuspended()) {
+ continue; // dead stream
+ }
+
aaudio_stream_state_t state = clientStream->getState();
if (state == AAUDIO_STREAM_STATE_STOPPING) {
allowUnderflow = false; // just read what is already in the FIFO
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 354b36a..defbb7b 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -179,6 +179,7 @@
}
setFlowing(false);
+ setSuspended(false);
// Start with fresh presentation timestamps.
mAtomicTimestamp.clear();
@@ -345,7 +346,9 @@
}
int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
if (count != 1) {
- ALOGE("%s(): Queue full. Did client die? %s", __func__, getTypeText());
+ ALOGW("%s(): Queue full. Did client stop? Suspending stream. what = %u, %s",
+ __func__, command->what, getTypeText());
+ setSuspended(true);
return AAUDIO_ERROR_WOULD_BLOCK;
} else {
return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index a1815d0..7904b25 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -204,6 +204,20 @@
}
/**
+ * Set false when the stream should not longer be processed.
+ * This may be caused by a message queue overflow.
+ * Set true when stream is started.
+ * @param suspended
+ */
+ void setSuspended(bool suspended) {
+ mSuspended = suspended;
+ }
+
+ bool isSuspended() const {
+ return mSuspended;
+ }
+
+ /**
* Atomically increment the number of active references to the stream by AAudioService.
*
* This is called under a global lock in AAudioStreamTracker.
@@ -304,7 +318,12 @@
// This is modified under a global lock in AAudioStreamTracker.
int32_t mCallingCount = 0;
+ // This indicates that a stream that is being referenced by a binder call needs to closed.
std::atomic<bool> mCloseNeeded{false};
+
+ // This indicate that a running stream should not be processed because of an error,
+ // for example a full message queue. Note that this atomic is unrelated to mCloseNeeded.
+ std::atomic<bool> mSuspended{false};
};
} /* namespace aaudio */