Merge "MediaUtils: Fix device unit tests"
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index 8f51458..6a311f2 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -5,3 +5,4 @@
ioprio rt 4
task_profiles CameraServiceCapacity MaxPerformance
rlimit rtprio 10 10
+ onrestart class_restart cameraWatchdog
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index dc9f848..5ecb130 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -2041,6 +2041,9 @@
sp<MediaCodecBuffer> outBuffer;
std::shared_ptr<C2Buffer> c2Buffer;
+ constexpr int kMaxReallocTry = 5;
+ int reallocTryNum = 0;
+
while (true) {
Mutexed<Output>::Locked output(mOutput);
if (!output->buffers) {
@@ -2048,6 +2051,9 @@
}
action = output->buffers->popFromStashAndRegister(
&c2Buffer, &index, &outBuffer);
+ if (action != OutputBuffers::REALLOCATE) {
+ reallocTryNum = 0;
+ }
switch (action) {
case OutputBuffers::SKIP:
return;
@@ -2058,6 +2064,13 @@
mCallback->onOutputBufferAvailable(index, outBuffer);
break;
case OutputBuffers::REALLOCATE:
+ if (++reallocTryNum > kMaxReallocTry) {
+ output.unlock();
+ ALOGE("[%s] sendOutputBuffers: tried %d realloc and failed",
+ mName, kMaxReallocTry);
+ mCCodecCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
if (!output->buffers->isArrayMode()) {
output->buffers =
output->buffers->toArrayMode(output->numSlots);
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index de8c298..1280577 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -2317,7 +2317,7 @@
status_t AudioSystem::getDirectProfilesForAttributes(const audio_attributes_t* attr,
std::vector<audio_profile>* audioProfiles) {
- if (attr == nullptr) {
+ if (attr == nullptr || audioProfiles == nullptr) {
return BAD_VALUE;
}
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
index 888d592..6e4cae7 100644
--- a/media/libaudioclient/TEST_MAPPING
+++ b/media/libaudioclient/TEST_MAPPING
@@ -29,5 +29,10 @@
{
"name": "trackplayerbase_tests"
}
+ ],
+ "postsubmit": [
+ {
+ "name": "audiosystem_tests"
+ }
]
}
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 6535b5b..e861932 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -118,7 +118,7 @@
"libshmemcompat",
"libstagefright_foundation",
"libutils",
- "libvibrator",
+ "libxml2",
"mediametricsservice-aidl-cpp",
"packagemanager_aidl-cpp",
"shared-file-region-aidl-cpp",
@@ -178,9 +178,6 @@
"audiorouting_tests.cpp",
"audio_test_utils.cpp",
],
- shared_libs: [
- "libxml2",
- ],
}
cc_test {
@@ -197,3 +194,12 @@
defaults: ["libaudioclient_gtests_defaults"],
srcs: ["trackplayerbase_tests.cpp"],
}
+
+cc_test {
+ name: "audiosystem_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audiosystem_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+}
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
index 018d920..44f0f50 100644
--- a/media/libaudioclient/tests/audio_test_utils.cpp
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -17,10 +17,26 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "AudioTestUtils"
+#include <system/audio_config.h>
#include <utils/Log.h>
#include "audio_test_utils.h"
+template <class T>
+constexpr void (*xmlDeleter)(T* t);
+template <>
+constexpr auto xmlDeleter<xmlDoc> = xmlFreeDoc;
+template <>
+constexpr auto xmlDeleter<xmlChar> = [](xmlChar* s) { xmlFree(s); };
+
+/** @return a unique_ptr with the correct deleter for the libxml2 object. */
+template <class T>
+constexpr auto make_xmlUnique(T* t) {
+ // Wrap deleter in lambda to enable empty base optimization
+ auto deleter = [](T* t) { xmlDeleter<T>(t); };
+ return std::unique_ptr<T, decltype(deleter)>{t, deleter};
+}
+
// Generates a random string.
void CreateRandomFile(int& fd) {
std::string filename = "/data/local/tmp/record-XXXXXX";
@@ -466,6 +482,11 @@
status_t status = OK;
mStopRecording = true;
if (mState != REC_STOPPED) {
+ if (mInputSource != AUDIO_SOURCE_DEFAULT) {
+ bool state = false;
+ status = AudioSystem::isSourceActive(mInputSource, &state);
+ if (status == OK && !state) status = BAD_VALUE;
+ }
mRecord->stopAndJoinCallbacks();
mState = REC_STOPPED;
LOG_FATAL_IF(true != mRecord->stopped());
@@ -793,3 +814,91 @@
result << dumpPortConfig(port.active_config);
return result.str();
}
+
+std::string getXmlAttribute(const xmlNode* cur, const char* attribute) {
+ auto charPtr = make_xmlUnique(xmlGetProp(cur, reinterpret_cast<const xmlChar*>(attribute)));
+ if (charPtr == NULL) {
+ return "";
+ }
+ std::string value(reinterpret_cast<const char*>(charPtr.get()));
+ return value;
+}
+
+status_t parse_audio_policy_configuration_xml(std::vector<std::string>& attachedDevices,
+ std::vector<MixPort>& mixPorts,
+ std::vector<Route>& routes) {
+ std::string path = audio_find_readable_configuration_file("audio_policy_configuration.xml");
+ if (path.length() == 0) return UNKNOWN_ERROR;
+ auto doc = make_xmlUnique(xmlParseFile(path.c_str()));
+ if (doc == nullptr) return UNKNOWN_ERROR;
+ xmlNode* root = xmlDocGetRootElement(doc.get());
+ if (root == nullptr) return UNKNOWN_ERROR;
+ if (xmlXIncludeProcess(doc.get()) < 0) return UNKNOWN_ERROR;
+ mixPorts.clear();
+ if (!xmlStrcmp(root->name, reinterpret_cast<const xmlChar*>("audioPolicyConfiguration"))) {
+ std::string raw{getXmlAttribute(root, "version")};
+ for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
+ if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("modules"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
+ if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("module"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("mixPorts"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("mixPort"))) {
+ MixPort mixPort;
+ xmlNode* root = child;
+ mixPort.name = getXmlAttribute(root, "name");
+ mixPort.role = getXmlAttribute(root, "role");
+ mixPort.flags = getXmlAttribute(root, "flags");
+ if (mixPort.role == "source") mixPorts.push_back(mixPort);
+ }
+ }
+ } else if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(
+ "attachedDevices"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("item"))) {
+ auto xmlValue = make_xmlUnique(xmlNodeListGetString(
+ child->doc, child->xmlChildrenNode, 1));
+ if (xmlValue == nullptr) {
+ raw = "";
+ } else {
+ raw = reinterpret_cast<const char*>(xmlValue.get());
+ }
+ std::string& value = raw;
+ attachedDevices.push_back(std::move(value));
+ }
+ }
+ } else if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("routes"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("route"))) {
+ Route route;
+ xmlNode* root = child;
+ route.name = getXmlAttribute(root, "name");
+ route.sources = getXmlAttribute(root, "sources");
+ route.sink = getXmlAttribute(root, "sink");
+ routes.push_back(route);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return OK;
+}
diff --git a/media/libaudioclient/tests/audio_test_utils.h b/media/libaudioclient/tests/audio_test_utils.h
index 526d5c4..f35b65d 100644
--- a/media/libaudioclient/tests/audio_test_utils.h
+++ b/media/libaudioclient/tests/audio_test_utils.h
@@ -28,6 +28,8 @@
#include <thread>
#include <binder/MemoryDealer.h>
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
#include <media/AidlConversion.h>
#include <media/AudioRecord.h>
#include <media/AudioTrack.h>
@@ -36,6 +38,21 @@
using namespace android;
+struct MixPort {
+ std::string name;
+ std::string role;
+ std::string flags;
+};
+
+struct Route {
+ std::string name;
+ std::string sources;
+ std::string sink;
+};
+
+status_t parse_audio_policy_configuration_xml(std::vector<std::string>& attachedDevices,
+ std::vector<MixPort>& mixPorts,
+ std::vector<Route>& routes);
void CreateRandomFile(int& fd);
status_t listAudioPorts(std::vector<audio_port_v7>& portsVec);
status_t listAudioPatches(std::vector<struct audio_patch>& patchesVec);
diff --git a/media/libaudioclient/tests/audiorecord_tests.cpp b/media/libaudioclient/tests/audiorecord_tests.cpp
index 754e6cc..8c63a6d 100644
--- a/media/libaudioclient/tests/audiorecord_tests.cpp
+++ b/media/libaudioclient/tests/audiorecord_tests.cpp
@@ -81,8 +81,8 @@
TEST_F(AudioRecordTest, TestAudioCbNotifier) {
EXPECT_EQ(BAD_VALUE, mAC->getAudioRecordHandle()->addAudioDeviceCallback(nullptr));
- sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
- sp<OnAudioDeviceUpdateNotifier> cbOld = new OnAudioDeviceUpdateNotifier();
+ sp<OnAudioDeviceUpdateNotifier> cb = sp<OnAudioDeviceUpdateNotifier>::make();
+ sp<OnAudioDeviceUpdateNotifier> cbOld = sp<OnAudioDeviceUpdateNotifier>::make();
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cbOld));
EXPECT_EQ(INVALID_OPERATION, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cbOld));
EXPECT_EQ(OK, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cb));
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
index 32ba597..445633b 100644
--- a/media/libaudioclient/tests/audiorouting_tests.cpp
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -18,130 +18,12 @@
#include <cutils/properties.h>
#include <gtest/gtest.h>
-#include <libxml/parser.h>
-#include <libxml/xinclude.h>
#include <string.h>
-#include <system/audio_config.h>
#include "audio_test_utils.h"
using namespace android;
-template <class T>
-constexpr void (*xmlDeleter)(T* t);
-template <>
-constexpr auto xmlDeleter<xmlDoc> = xmlFreeDoc;
-template <>
-constexpr auto xmlDeleter<xmlChar> = [](xmlChar* s) { xmlFree(s); };
-
-/** @return a unique_ptr with the correct deleter for the libxml2 object. */
-template <class T>
-constexpr auto make_xmlUnique(T* t) {
- // Wrap deleter in lambda to enable empty base optimization
- auto deleter = [](T* t) { xmlDeleter<T>(t); };
- return std::unique_ptr<T, decltype(deleter)>{t, deleter};
-}
-
-std::string getXmlAttribute(const xmlNode* cur, const char* attribute) {
- auto charPtr = make_xmlUnique(xmlGetProp(cur, reinterpret_cast<const xmlChar*>(attribute)));
- if (charPtr == NULL) {
- return "";
- }
- std::string value(reinterpret_cast<const char*>(charPtr.get()));
- return value;
-}
-
-struct MixPort {
- std::string name;
- std::string role;
- std::string flags;
-};
-
-struct Route {
- std::string name;
- std::string sources;
- std::string sink;
-};
-
-status_t parse_audio_policy_configuration_xml(std::vector<std::string>& attachedDevices,
- std::vector<MixPort>& mixPorts,
- std::vector<Route>& routes) {
- std::string path = audio_find_readable_configuration_file("audio_policy_configuration.xml");
- if (path.length() == 0) return UNKNOWN_ERROR;
- auto doc = make_xmlUnique(xmlParseFile(path.c_str()));
- if (doc == nullptr) return UNKNOWN_ERROR;
- xmlNode* root = xmlDocGetRootElement(doc.get());
- if (root == nullptr) return UNKNOWN_ERROR;
- if (xmlXIncludeProcess(doc.get()) < 0) return UNKNOWN_ERROR;
- mixPorts.clear();
- if (!xmlStrcmp(root->name, reinterpret_cast<const xmlChar*>("audioPolicyConfiguration"))) {
- std::string raw{getXmlAttribute(root, "version")};
- for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
- if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("modules"))) {
- xmlNode* root = child;
- for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
- if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("module"))) {
- xmlNode* root = child;
- for (auto* child = root->xmlChildrenNode; child != nullptr;
- child = child->next) {
- if (!xmlStrcmp(child->name,
- reinterpret_cast<const xmlChar*>("mixPorts"))) {
- xmlNode* root = child;
- for (auto* child = root->xmlChildrenNode; child != nullptr;
- child = child->next) {
- if (!xmlStrcmp(child->name,
- reinterpret_cast<const xmlChar*>("mixPort"))) {
- MixPort mixPort;
- xmlNode* root = child;
- mixPort.name = getXmlAttribute(root, "name");
- mixPort.role = getXmlAttribute(root, "role");
- mixPort.flags = getXmlAttribute(root, "flags");
- if (mixPort.role == "source") mixPorts.push_back(mixPort);
- }
- }
- } else if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(
- "attachedDevices"))) {
- xmlNode* root = child;
- for (auto* child = root->xmlChildrenNode; child != nullptr;
- child = child->next) {
- if (!xmlStrcmp(child->name,
- reinterpret_cast<const xmlChar*>("item"))) {
- auto xmlValue = make_xmlUnique(xmlNodeListGetString(
- child->doc, child->xmlChildrenNode, 1));
- if (xmlValue == nullptr) {
- raw = "";
- } else {
- raw = reinterpret_cast<const char*>(xmlValue.get());
- }
- std::string& value = raw;
- attachedDevices.push_back(std::move(value));
- }
- }
- } else if (!xmlStrcmp(child->name,
- reinterpret_cast<const xmlChar*>("routes"))) {
- xmlNode* root = child;
- for (auto* child = root->xmlChildrenNode; child != nullptr;
- child = child->next) {
- if (!xmlStrcmp(child->name,
- reinterpret_cast<const xmlChar*>("route"))) {
- Route route;
- xmlNode* root = child;
- route.name = getXmlAttribute(root, "name");
- route.sources = getXmlAttribute(root, "sources");
- route.sink = getXmlAttribute(root, "sink");
- routes.push_back(route);
- }
- }
- }
- }
- }
- }
- }
- }
- }
- return OK;
-}
-
// UNIT TEST
TEST(AudioTrackTest, TestPerformanceMode) {
std::vector<std::string> attachedDevices;
@@ -185,7 +67,7 @@
ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
<< "Unable to open Resource";
EXPECT_EQ(OK, ap->create()) << "track creation failed";
- sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
+ sp<OnAudioDeviceUpdateNotifier> cb = sp<OnAudioDeviceUpdateNotifier>::make();
EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cb));
EXPECT_EQ(OK, ap->start()) << "audio track start failed";
EXPECT_EQ(OK, ap->onProcess());
diff --git a/media/libaudioclient/tests/audiosystem_tests.cpp b/media/libaudioclient/tests/audiosystem_tests.cpp
new file mode 100644
index 0000000..aed847c
--- /dev/null
+++ b/media/libaudioclient/tests/audiosystem_tests.cpp
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioSystemTest"
+
+#include <string.h>
+
+#include <gtest/gtest.h>
+#include <media/IAudioFlinger.h>
+#include <utils/Log.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+
+void anyPatchContainsInputDevice(audio_port_handle_t deviceId, bool& res) {
+ std::vector<struct audio_patch> patches;
+ status_t status = listAudioPatches(patches);
+ ASSERT_EQ(OK, status);
+ res = false;
+ for (const auto& patch : patches) {
+ if (patchContainsInputDevice(deviceId, patch)) {
+ res = true;
+ return;
+ }
+ }
+}
+
+class AudioSystemTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ mAF = AudioSystem::get_audio_flinger();
+ ASSERT_NE(mAF, nullptr) << "Permission denied";
+ }
+
+ void TearDown() override {
+ if (mPlayback) {
+ mPlayback->stop();
+ mPlayback->getAudioTrackHandle()->removeAudioDeviceCallback(mCbPlayback);
+ mPlayback.clear();
+ }
+ if (mCapture) {
+ mCapture->stop();
+ mCapture->getAudioRecordHandle()->removeAudioDeviceCallback(mCbRecord);
+ mCapture.clear();
+ }
+ }
+
+ void createPlaybackSession(void);
+ void createRecordSession(void);
+
+ sp<IAudioFlinger> mAF;
+ sp<AudioPlayback> mPlayback;
+ sp<OnAudioDeviceUpdateNotifier> mCbPlayback;
+ sp<AudioCapture> mCapture;
+ sp<OnAudioDeviceUpdateNotifier> mCbRecord;
+};
+
+void AudioSystemTest::createPlaybackSession(void) {
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = AUDIO_USAGE_MEDIA;
+ attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ mPlayback = sp<AudioPlayback>::make(48000, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_FAST, AUDIO_SESSION_NONE,
+ AudioTrack::TRANSFER_SHARED, &attributes);
+ ASSERT_NE(nullptr, mPlayback);
+ ASSERT_EQ(NO_ERROR, mPlayback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
+ EXPECT_EQ(NO_ERROR, mPlayback->create());
+ mCbPlayback = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, mPlayback->getAudioTrackHandle()->addAudioDeviceCallback(mCbPlayback));
+ EXPECT_EQ(NO_ERROR, mPlayback->start());
+ EXPECT_EQ(OK, mPlayback->onProcess());
+ EXPECT_EQ(OK, mCbPlayback->waitForAudioDeviceCb());
+}
+
+void AudioSystemTest::createRecordSession(void) {
+ mCapture = new AudioCapture(AUDIO_SOURCE_DEFAULT, 44100, AUDIO_FORMAT_PCM_8_24_BIT,
+ AUDIO_CHANNEL_IN_MONO, AUDIO_INPUT_FLAG_FAST);
+ ASSERT_NE(nullptr, mCapture);
+ ASSERT_EQ(OK, mCapture->create()) << "record creation failed";
+ mCbRecord = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, mCapture->getAudioRecordHandle()->addAudioDeviceCallback(mCbRecord));
+ EXPECT_EQ(OK, mCapture->start()) << "record creation failed";
+ EXPECT_EQ(OK, mCbRecord->waitForAudioDeviceCb());
+}
+
+// UNIT TESTS
+TEST_F(AudioSystemTest, CheckServerSideValues) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ EXPECT_GT(mAF->sampleRate(mCbPlayback->mAudioIo), 0);
+ EXPECT_NE(mAF->format(mCbPlayback->mAudioIo), AUDIO_FORMAT_INVALID);
+ EXPECT_GT(mAF->frameCount(mCbPlayback->mAudioIo), 0);
+ size_t frameCountHal, frameCountHalCache;
+ frameCountHal = mAF->frameCountHAL(mCbPlayback->mAudioIo);
+ EXPECT_GT(frameCountHal, 0);
+ EXPECT_EQ(OK, AudioSystem::getFrameCountHAL(mCbPlayback->mAudioIo, &frameCountHalCache));
+ EXPECT_EQ(frameCountHal, frameCountHalCache);
+ EXPECT_GT(mAF->latency(mCbPlayback->mAudioIo), 0);
+ // client side latency is at least server side latency
+ EXPECT_LE(mAF->latency(mCbPlayback->mAudioIo), mPlayback->getAudioTrackHandle()->latency());
+
+ ASSERT_NO_FATAL_FAILURE(createRecordSession());
+ EXPECT_GT(mAF->sampleRate(mCbRecord->mAudioIo), 0);
+ // EXPECT_NE(mAF->format(mCbRecord->mAudioIo), AUDIO_FORMAT_INVALID);
+ EXPECT_GT(mAF->frameCount(mCbRecord->mAudioIo), 0);
+ EXPECT_GT(mAF->frameCountHAL(mCbRecord->mAudioIo), 0);
+ frameCountHal = mAF->frameCountHAL(mCbRecord->mAudioIo);
+ EXPECT_GT(frameCountHal, 0);
+ EXPECT_EQ(OK, AudioSystem::getFrameCountHAL(mCbRecord->mAudioIo, &frameCountHalCache));
+ EXPECT_EQ(frameCountHal, frameCountHalCache);
+ // EXPECT_GT(mAF->latency(mCbRecord->mAudioIo), 0);
+ // client side latency is at least server side latency
+ // EXPECT_LE(mAF->latency(mCbRecord->mAudioIo), mCapture->getAudioRecordHandle()->latency());
+
+ EXPECT_GT(AudioSystem::getPrimaryOutputSamplingRate(), 0); // first fast mixer sample rate
+ EXPECT_GT(AudioSystem::getPrimaryOutputFrameCount(), 0); // fast mixer frame count
+}
+
+TEST_F(AudioSystemTest, GetSetMasterVolume) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ float origVol, tstVol;
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterVolume(&origVol));
+ float newVol;
+ if (origVol + 0.2f > 1.0f) {
+ newVol = origVol - 0.2f;
+ } else {
+ newVol = origVol + 0.2f;
+ }
+ EXPECT_EQ(NO_ERROR, AudioSystem::setMasterVolume(newVol));
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterVolume(&tstVol));
+ EXPECT_EQ(newVol, tstVol);
+ EXPECT_EQ(NO_ERROR, AudioSystem::setMasterVolume(origVol));
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterVolume(&tstVol));
+ EXPECT_EQ(origVol, tstVol);
+}
+
+TEST_F(AudioSystemTest, GetSetMasterMute) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ bool origMuteState, tstMuteState;
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterMute(&origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::setMasterMute(!origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterMute(&tstMuteState));
+ EXPECT_EQ(!origMuteState, tstMuteState);
+ EXPECT_EQ(NO_ERROR, AudioSystem::setMasterMute(origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::getMasterMute(&tstMuteState));
+ EXPECT_EQ(origMuteState, tstMuteState);
+}
+
+TEST_F(AudioSystemTest, GetSetMicMute) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ bool origMuteState, tstMuteState;
+ EXPECT_EQ(NO_ERROR, AudioSystem::isMicrophoneMuted(&origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::muteMicrophone(!origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::isMicrophoneMuted(&tstMuteState));
+ EXPECT_EQ(!origMuteState, tstMuteState);
+ EXPECT_EQ(NO_ERROR, AudioSystem::muteMicrophone(origMuteState));
+ EXPECT_EQ(NO_ERROR, AudioSystem::isMicrophoneMuted(&tstMuteState));
+ EXPECT_EQ(origMuteState, tstMuteState);
+}
+
+TEST_F(AudioSystemTest, GetSetMasterBalance) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ float origBalance, tstBalance;
+ EXPECT_EQ(OK, AudioSystem::getMasterBalance(&origBalance));
+ float newBalance;
+ if (origBalance + 0.2f > 1.0f) {
+ newBalance = origBalance - 0.2f;
+ } else {
+ newBalance = origBalance + 0.2f;
+ }
+ EXPECT_EQ(OK, AudioSystem::setMasterBalance(newBalance));
+ EXPECT_EQ(OK, AudioSystem::getMasterBalance(&tstBalance));
+ EXPECT_EQ(newBalance, tstBalance);
+ EXPECT_EQ(OK, AudioSystem::setMasterBalance(origBalance));
+ EXPECT_EQ(OK, AudioSystem::getMasterBalance(&tstBalance));
+ EXPECT_EQ(origBalance, tstBalance);
+}
+
+TEST_F(AudioSystemTest, GetStreamVolume) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ float origStreamVol;
+ EXPECT_EQ(NO_ERROR, AudioSystem::getStreamVolume(AUDIO_STREAM_MUSIC, &origStreamVol,
+ mCbPlayback->mAudioIo));
+}
+
+TEST_F(AudioSystemTest, GetStreamMute) {
+ ASSERT_NO_FATAL_FAILURE(createPlaybackSession());
+ bool origMuteState;
+ EXPECT_EQ(NO_ERROR, AudioSystem::getStreamMute(AUDIO_STREAM_MUSIC, &origMuteState));
+}
+
+TEST_F(AudioSystemTest, StartAndStopAudioSource) {
+ std::vector<struct audio_port_v7> ports;
+ audio_port_config sourcePortConfig;
+ audio_attributes_t attributes = AudioSystem::streamTypeToAttributes(AUDIO_STREAM_MUSIC);
+ audio_port_handle_t sourcePortHandle = AUDIO_PORT_HANDLE_NONE;
+
+ status_t status = listAudioPorts(ports);
+ ASSERT_EQ(OK, status);
+ if (ports.empty()) {
+ GTEST_SKIP() << "No ports returned by the audio system";
+ }
+
+ for (const auto& port : ports) {
+ if (port.role != AUDIO_PORT_ROLE_SOURCE || port.type != AUDIO_PORT_TYPE_DEVICE) continue;
+ sourcePortConfig = port.active_config;
+
+ bool patchFound;
+
+ // start audio source.
+ status_t ret =
+ AudioSystem::startAudioSource(&sourcePortConfig, &attributes, &sourcePortHandle);
+ EXPECT_EQ(OK, ret) << "AudioSystem::startAudioSource for source " << port.ext.device.address
+ << " failed";
+
+ // verify that patch is established by the source port.
+ ASSERT_NO_FATAL_FAILURE(anyPatchContainsInputDevice(port.id, patchFound));
+ EXPECT_EQ(true, patchFound);
+ EXPECT_NE(sourcePortHandle, AUDIO_PORT_HANDLE_NONE);
+
+ if (sourcePortHandle != AUDIO_PORT_HANDLE_NONE) {
+ ret = AudioSystem::stopAudioSource(sourcePortHandle);
+ EXPECT_EQ(OK, ret) << "AudioSystem::stopAudioSource for handle failed";
+ }
+
+ // verify that no source port patch exists.
+ ASSERT_NO_FATAL_FAILURE(anyPatchContainsInputDevice(port.id, patchFound));
+ EXPECT_EQ(false, patchFound);
+ }
+}
+
+TEST_F(AudioSystemTest, CreateAndReleaseAudioPatch) {
+ status_t status;
+ struct audio_patch audioPatch;
+ std::vector<struct audio_port_v7> ports;
+ audio_patch_handle_t audioPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+
+ bool patchFound = false;
+ audio_port_v7 sourcePort{};
+ audio_port_v7 sinkPort{};
+
+ audioPatch.id = 0;
+ audioPatch.num_sources = 1;
+ audioPatch.num_sinks = 1;
+
+ status = listAudioPorts(ports);
+ ASSERT_EQ(OK, status);
+ if (ports.empty()) {
+ GTEST_SKIP() << "No output devices returned by the audio system";
+ }
+
+ for (const auto& port : ports) {
+ if (port.role == AUDIO_PORT_ROLE_SOURCE && port.type == AUDIO_PORT_TYPE_DEVICE) {
+ sourcePort = port;
+ }
+ if (port.role == AUDIO_PORT_ROLE_SINK && port.type == AUDIO_PORT_TYPE_DEVICE &&
+ port.ext.device.type == AUDIO_DEVICE_OUT_SPEAKER) {
+ sinkPort = port;
+ }
+ }
+
+ audioPatch.sources[0] = sourcePort.active_config;
+ audioPatch.sinks[0] = sinkPort.active_config;
+
+ status = AudioSystem::createAudioPatch(&audioPatch, &audioPatchHandle);
+ EXPECT_EQ(OK, status) << "AudioSystem::createAudiopatch failed between source "
+ << sourcePort.ext.device.address << " and sink "
+ << sinkPort.ext.device.address;
+
+ // verify that patch is established between source and the sink.
+ ASSERT_NO_FATAL_FAILURE(anyPatchContainsInputDevice(sourcePort.id, patchFound));
+ EXPECT_EQ(true, patchFound);
+
+ EXPECT_NE(AUDIO_PORT_HANDLE_NONE, audioPatchHandle);
+ status = AudioSystem::releaseAudioPatch(audioPatchHandle);
+ EXPECT_EQ(OK, status) << "AudioSystem::releaseAudioPatch failed between source "
+ << sourcePort.ext.device.address << " and sink "
+ << sinkPort.ext.device.address;
+
+ // verify that no patch is established between source and the sink after releaseAudioPatch.
+ ASSERT_NO_FATAL_FAILURE(anyPatchContainsInputDevice(sourcePort.id, patchFound));
+ EXPECT_EQ(false, patchFound);
+}
+
+TEST_F(AudioSystemTest, GetAudioPort) {
+ std::vector<struct audio_port_v7> ports;
+ status_t status = listAudioPorts(ports);
+ ASSERT_EQ(OK, status);
+ for (const auto& port : ports) {
+ audio_port_v7 portTest{.id = port.id};
+ EXPECT_EQ(OK, AudioSystem::getAudioPort(&portTest));
+ EXPECT_TRUE(audio_ports_v7_are_equal(&portTest, &port));
+ }
+}
+
+TEST_F(AudioSystemTest, TestPhoneState) {
+ uid_t uid = getuid();
+ EXPECT_EQ(OK, AudioSystem::setPhoneState(AUDIO_MODE_RINGTONE, uid));
+ audio_mode_t state = AudioSystem::getPhoneState();
+ EXPECT_EQ(AUDIO_MODE_RINGTONE, state);
+ EXPECT_EQ(OK, AudioSystem::setPhoneState(AUDIO_MODE_IN_COMMUNICATION, uid));
+ state = AudioSystem::getPhoneState();
+ EXPECT_EQ(AUDIO_MODE_IN_COMMUNICATION, state);
+ EXPECT_EQ(OK, AudioSystem::setPhoneState(AUDIO_MODE_NORMAL, uid));
+ state = AudioSystem::getPhoneState();
+ EXPECT_EQ(AUDIO_MODE_NORMAL, state);
+}
+
+TEST_F(AudioSystemTest, GetDirectProfilesForAttributes) {
+ std::vector<audio_profile> audioProfiles;
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = AUDIO_USAGE_MEDIA;
+ attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDirectProfilesForAttributes(nullptr, nullptr));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDirectProfilesForAttributes(nullptr, &audioProfiles));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDirectProfilesForAttributes(&attributes, nullptr));
+ EXPECT_EQ(NO_ERROR, AudioSystem::getDirectProfilesForAttributes(&attributes, &audioProfiles));
+}
+
+bool isPublicStrategy(const AudioProductStrategy& strategy) {
+ bool result = true;
+ for (auto& attribute : strategy.getAudioAttributes()) {
+ if (attribute.getAttributes() == AUDIO_ATTRIBUTES_INITIALIZER &&
+ (uint32_t(attribute.getStreamType()) >= AUDIO_STREAM_PUBLIC_CNT)) {
+ result = false;
+ break;
+ }
+ }
+ return result;
+}
+
+TEST_F(AudioSystemTest, DevicesForRoleAndStrategy) {
+ std::vector<struct audio_port_v7> ports;
+ status_t status = listAudioPorts(ports);
+ ASSERT_EQ(OK, status);
+
+ std::vector<struct audio_port_v7> devicePorts;
+ for (const auto& port : ports) {
+ if (port.type == AUDIO_PORT_TYPE_DEVICE && audio_is_output_device(port.ext.device.type)) {
+ devicePorts.push_back(port);
+ }
+ }
+ if (devicePorts.empty()) {
+ GTEST_SKIP() << "No output devices returned by the audio system";
+ }
+
+ AudioProductStrategyVector strategies;
+ EXPECT_EQ(OK, AudioSystem::listAudioProductStrategies(strategies));
+ if (strategies.empty()) {
+ GTEST_SKIP() << "No strategies returned by the audio system";
+ }
+
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = AUDIO_USAGE_MEDIA;
+
+ bool hasStrategyForMedia = false;
+ AudioProductStrategy mediaStrategy;
+ for (const auto& strategy : strategies) {
+ if (!isPublicStrategy(strategy)) continue;
+
+ for (const auto& att : strategy.getAudioAttributes()) {
+ if (strategy.attributesMatches(att.getAttributes(), attributes)) {
+ hasStrategyForMedia = true;
+ mediaStrategy = strategy;
+ break;
+ }
+ }
+ }
+
+ if (!hasStrategyForMedia) {
+ GTEST_SKIP() << "No strategies returned for music media";
+ }
+
+ AudioDeviceTypeAddrVector devices;
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDevicesForRoleAndStrategy(PRODUCT_STRATEGY_NONE,
+ DEVICE_ROLE_PREFERRED, devices));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDevicesForRoleAndStrategy(mediaStrategy.getId(),
+ DEVICE_ROLE_NONE, devices));
+ status = AudioSystem::getDevicesForRoleAndStrategy(mediaStrategy.getId(), DEVICE_ROLE_PREFERRED,
+ devices);
+ if (status == NAME_NOT_FOUND) {
+ AudioDeviceTypeAddrVector outputDevices;
+ for (const auto& port : devicePorts) {
+ if (port.ext.device.type == AUDIO_DEVICE_OUT_SPEAKER) {
+ const AudioDeviceTypeAddr outputDevice(port.ext.device.type,
+ port.ext.device.address);
+ outputDevices.push_back(outputDevice);
+ }
+ }
+ EXPECT_EQ(OK, AudioSystem::setDevicesRoleForStrategy(mediaStrategy.getId(),
+ DEVICE_ROLE_PREFERRED, outputDevices));
+ EXPECT_EQ(OK, AudioSystem::getDevicesForRoleAndStrategy(mediaStrategy.getId(),
+ DEVICE_ROLE_PREFERRED, devices));
+ EXPECT_EQ(devices, outputDevices);
+ EXPECT_EQ(OK, AudioSystem::removeDevicesRoleForStrategy(mediaStrategy.getId(),
+ DEVICE_ROLE_PREFERRED));
+ EXPECT_EQ(NAME_NOT_FOUND, AudioSystem::getDevicesForRoleAndStrategy(
+ mediaStrategy.getId(), DEVICE_ROLE_PREFERRED, devices));
+ }
+}
+
+TEST_F(AudioSystemTest, VolumeIndexForAttributes) {
+ AudioVolumeGroupVector groups;
+ EXPECT_EQ(OK, AudioSystem::listAudioVolumeGroups(groups));
+ for (const auto& group : groups) {
+ if (group.getAudioAttributes().empty()) continue;
+ const audio_attributes_t attr = group.getAudioAttributes()[0];
+ if (attr == AUDIO_ATTRIBUTES_INITIALIZER) continue;
+ audio_stream_type_t streamType = AudioSystem::attributesToStreamType(attr);
+ if (streamType >= AUDIO_STREAM_PUBLIC_CNT) continue;
+
+ volume_group_t vg;
+ EXPECT_EQ(OK, AudioSystem::getVolumeGroupFromAudioAttributes(attr, vg));
+ EXPECT_EQ(group.getId(), vg);
+
+ int index;
+ EXPECT_EQ(OK,
+ AudioSystem::getVolumeIndexForAttributes(attr, index, AUDIO_DEVICE_OUT_SPEAKER));
+
+ int indexTest;
+ EXPECT_EQ(OK, AudioSystem::getStreamVolumeIndex(streamType, &indexTest,
+ AUDIO_DEVICE_OUT_SPEAKER));
+ EXPECT_EQ(index, indexTest);
+ }
+}
+
+TEST_F(AudioSystemTest, DevicesRoleForCapturePreset) {
+ std::vector<struct audio_port_v7> ports;
+ status_t status = listAudioPorts(ports);
+ ASSERT_EQ(OK, status);
+
+ if (ports.empty()) {
+ GTEST_SKIP() << "No ports returned by the audio system";
+ }
+
+ audio_devices_t inDeviceA = AUDIO_DEVICE_IN_BUILTIN_MIC;
+ audio_devices_t inDeviceB = AUDIO_DEVICE_IN_BUILTIN_MIC;
+ for (const auto& port : ports) {
+ if (port.role != AUDIO_PORT_ROLE_SOURCE || port.type != AUDIO_PORT_TYPE_DEVICE) continue;
+ if (port.ext.device.type == inDeviceA) continue;
+ inDeviceB = port.ext.device.type;
+ break;
+ }
+ const audio_source_t audioSource = AUDIO_SOURCE_MIC;
+ const device_role_t role = DEVICE_ROLE_PREFERRED;
+ const AudioDeviceTypeAddr inputDevice(inDeviceA, "");
+ const AudioDeviceTypeAddrVector inputDevices = {inputDevice};
+ const AudioDeviceTypeAddr outputDevice(AUDIO_DEVICE_OUT_SPEAKER, "");
+ const AudioDeviceTypeAddrVector outputDevices = {outputDevice};
+
+ // Test invalid device when setting
+ EXPECT_EQ(BAD_VALUE,
+ AudioSystem::setDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ EXPECT_EQ(BAD_VALUE,
+ AudioSystem::addDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ EXPECT_EQ(BAD_VALUE,
+ AudioSystem::removeDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+
+ // Test invalid role
+ AudioDeviceTypeAddrVector devices;
+ EXPECT_EQ(BAD_VALUE, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource,
+ DEVICE_ROLE_NONE, devices));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::setDevicesRoleForCapturePreset(audioSource, DEVICE_ROLE_NONE,
+ inputDevices));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::addDevicesRoleForCapturePreset(audioSource, DEVICE_ROLE_NONE,
+ inputDevices));
+ EXPECT_EQ(BAD_VALUE, AudioSystem::removeDevicesRoleForCapturePreset(
+ audioSource, DEVICE_ROLE_NONE, inputDevices));
+ EXPECT_EQ(BAD_VALUE,
+ AudioSystem::clearDevicesRoleForCapturePreset(audioSource, DEVICE_ROLE_NONE));
+
+ // Without setting, call get/remove/clear must fail
+ EXPECT_EQ(NAME_NOT_FOUND,
+ AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_TRUE(devices.empty());
+ EXPECT_EQ(NAME_NOT_FOUND,
+ AudioSystem::removeDevicesRoleForCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(NAME_NOT_FOUND, AudioSystem::clearDevicesRoleForCapturePreset(audioSource, role));
+
+ // Test set/get devices role
+ EXPECT_EQ(NO_ERROR,
+ AudioSystem::setDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ ASSERT_EQ(NO_ERROR, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(devices, inputDevices);
+
+ // Test setting will change the previously set devices
+ const AudioDeviceTypeAddr inputDevice2 = AudioDeviceTypeAddr(inDeviceB, "");
+ AudioDeviceTypeAddrVector inputDevices2 = {inputDevice2};
+ EXPECT_EQ(NO_ERROR,
+ AudioSystem::setDevicesRoleForCapturePreset(audioSource, role, inputDevices2));
+ devices.clear();
+ EXPECT_EQ(NO_ERROR, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(devices, inputDevices2);
+
+ // Test add devices
+ EXPECT_EQ(NO_ERROR,
+ AudioSystem::addDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ EXPECT_EQ(NO_ERROR, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(2, devices.size());
+ EXPECT_TRUE(std::find(devices.begin(), devices.end(), inputDevice) != devices.end());
+ EXPECT_TRUE(std::find(devices.begin(), devices.end(), inputDevice2) != devices.end());
+
+ // Test remove devices
+ EXPECT_EQ(NO_ERROR,
+ AudioSystem::removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ EXPECT_EQ(NO_ERROR, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(devices, inputDevices2);
+
+ // Test remove devices that are not set as the device role
+ EXPECT_EQ(BAD_VALUE,
+ AudioSystem::removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+
+ // Test clear devices
+ EXPECT_EQ(NO_ERROR, AudioSystem::clearDevicesRoleForCapturePreset(audioSource, role));
+ devices.clear();
+ EXPECT_EQ(NAME_NOT_FOUND,
+ AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+
+ AudioDeviceTypeAddrVector inputDevices3 = {inputDevice, inputDevice2};
+ EXPECT_EQ(NO_ERROR,
+ AudioSystem::setDevicesRoleForCapturePreset(audioSource, role, inputDevices3));
+ devices.clear();
+ EXPECT_EQ(NO_ERROR, AudioSystem::getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_EQ(2, devices.size());
+ EXPECT_TRUE(std::find(devices.begin(), devices.end(), inputDevice) != devices.end());
+ EXPECT_TRUE(std::find(devices.begin(), devices.end(), inputDevice2) != devices.end());
+ EXPECT_EQ(NO_ERROR, AudioSystem::clearDevicesRoleForCapturePreset(audioSource, role));
+}
+
+TEST_F(AudioSystemTest, UidDeviceAffinities) {
+ uid_t uid = getuid();
+
+ // Test invalid device for example audio_is_input_device
+ AudioDeviceTypeAddr inputDevice(AUDIO_DEVICE_IN_BUILTIN_MIC, "");
+ AudioDeviceTypeAddrVector inputDevices = {inputDevice};
+ EXPECT_EQ(BAD_VALUE, AudioSystem::setUidDeviceAffinities(uid, inputDevices));
+
+ // Test valid device for example audio_is_output_device
+ AudioDeviceTypeAddr outputDevice(AUDIO_DEVICE_OUT_SPEAKER, "");
+ AudioDeviceTypeAddrVector outputDevices = {outputDevice};
+ EXPECT_EQ(NO_ERROR, AudioSystem::setUidDeviceAffinities(uid, outputDevices));
+ EXPECT_EQ(NO_ERROR, AudioSystem::removeUidDeviceAffinities(uid));
+}
+
+TEST_F(AudioSystemTest, UserIdDeviceAffinities) {
+ int userId = 200;
+
+ // Test invalid device for example audio_is_input_device
+ AudioDeviceTypeAddr inputDevice(AUDIO_DEVICE_IN_BUILTIN_MIC, "");
+ AudioDeviceTypeAddrVector inputDevices = {inputDevice};
+ EXPECT_EQ(BAD_VALUE, AudioSystem::setUserIdDeviceAffinities(userId, inputDevices));
+
+ // Test valid device for ezample audio_is_output_device
+ AudioDeviceTypeAddr outputDevice(AUDIO_DEVICE_OUT_SPEAKER, "");
+ AudioDeviceTypeAddrVector outputDevices = {outputDevice};
+ EXPECT_EQ(NO_ERROR, AudioSystem::setUserIdDeviceAffinities(userId, outputDevices));
+ EXPECT_EQ(NO_ERROR, AudioSystem::removeUserIdDeviceAffinities(userId));
+}
diff --git a/media/libaudioclient/tests/audiotrack_tests.cpp b/media/libaudioclient/tests/audiotrack_tests.cpp
index 1b42a49..8daba0a 100644
--- a/media/libaudioclient/tests/audiotrack_tests.cpp
+++ b/media/libaudioclient/tests/audiotrack_tests.cpp
@@ -128,8 +128,8 @@
<< "Unable to open Resource";
EXPECT_EQ(OK, ap->create()) << "track creation failed";
EXPECT_EQ(BAD_VALUE, ap->getAudioTrackHandle()->addAudioDeviceCallback(nullptr));
- sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
- sp<OnAudioDeviceUpdateNotifier> cbOld = new OnAudioDeviceUpdateNotifier();
+ sp<OnAudioDeviceUpdateNotifier> cb = sp<OnAudioDeviceUpdateNotifier>::make();
+ sp<OnAudioDeviceUpdateNotifier> cbOld = sp<OnAudioDeviceUpdateNotifier>::make();
EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cbOld));
EXPECT_EQ(INVALID_OPERATION, ap->getAudioTrackHandle()->addAudioDeviceCallback(cbOld));
EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cb));
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 63d3180..9ff2177 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -155,6 +155,7 @@
void bufferChunk(int64_t timestampUs);
bool isAvc() const { return mIsAvc; }
bool isHevc() const { return mIsHevc; }
+ bool isAv1() const { return mIsAv1; }
bool isHeic() const { return mIsHeic; }
bool isAudio() const { return mIsAudio; }
bool isMPEG4() const { return mIsMPEG4; }
@@ -319,6 +320,7 @@
volatile bool mStarted;
bool mIsAvc;
bool mIsHevc;
+ bool mIsAv1;
bool mIsDovi;
bool mIsAudio;
bool mIsVideo;
@@ -467,6 +469,7 @@
void writePaspBox();
void writeAvccBox();
void writeHvccBox();
+ void writeAv1cBox();
void writeDoviConfigBox();
void writeUrlBox();
void writeDrefBox();
@@ -660,6 +663,8 @@
return "avc1";
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
return "hvc1";
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AV1, mime)) {
+ return "av01";
}
} else if (!strncasecmp(mime, "application/", 12)) {
return "mett";
@@ -1541,6 +1546,15 @@
writeFourcc("isom");
writeFourcc("mp42");
}
+ // If an AV1 video track is present, write "av01" as one of the
+ // compatible brands.
+ for (List<Track *>::iterator it = mTracks.begin(); it != mTracks.end();
+ ++it) {
+ if ((*it)->isAv1()) {
+ writeFourcc("av01");
+ break;
+ }
+ }
}
endBox();
@@ -2205,6 +2219,7 @@
mMeta->findCString(kKeyMIMEType, &mime);
mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ mIsAv1 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1);
mIsDovi = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
mIsAudio = !strncasecmp(mime, "audio/", 6);
mIsVideo = !strncasecmp(mime, "video/", 6);
@@ -2639,6 +2654,8 @@
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
mMeta->findData(kKeyHVCC, &type, &data, &size);
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+ mMeta->findData(kKeyAV1C, &type, &data, &size);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
getDolbyVisionProfile();
if (!mMeta->findData(kKeyAVCC, &type, &data, &size) &&
@@ -4262,6 +4279,7 @@
!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_AV1, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime) ||
!strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
if (!mCodecSpecificData ||
@@ -4433,6 +4451,8 @@
writeAvccBox();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
writeHvccBox();
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AV1, mime)) {
+ writeAv1cBox();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime)) {
if (mDoviProfile <= DolbyVisionProfileDvheSt) {
writeHvccBox();
@@ -5000,6 +5020,15 @@
mOwner->endBox(); // hvcC
}
+void MPEG4Writer::Track::writeAv1cBox() {
+ CHECK(mCodecSpecificData);
+ CHECK_GE(mCodecSpecificDataSize, 4u);
+
+ mOwner->beginBox("av1C");
+ mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
+ mOwner->endBox(); // av1C
+}
+
void MPEG4Writer::Track::writeDoviConfigBox() {
CHECK_NE(mDoviProfile, 0u);
diff --git a/media/libstagefright/timedtext/TextDescriptions.cpp b/media/libstagefright/timedtext/TextDescriptions.cpp
index 2c2d11d..3fec9ed 100644
--- a/media/libstagefright/timedtext/TextDescriptions.cpp
+++ b/media/libstagefright/timedtext/TextDescriptions.cpp
@@ -466,6 +466,10 @@
if (subChunkType == FOURCC('f', 't', 'a', 'b'))
{
+ if(subChunkSize < 8) {
+ return OK;
+ }
+
tmpData += 8;
size_t subChunkRemaining = subChunkSize - 8;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 6adf6ef..ac89655 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3967,19 +3967,24 @@
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
- // mono blend occurs for mixer threads only (not direct or offloaded)
- // and is handled here if we're going directly to the sink.
- if (requireMonoBlend() && !mEffectBufferValid) {
- mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount, mNormalFrameCount,
- true /*limit*/);
- }
+ // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
+ // do these processes after effects are applied.
+ if (!mEffectBufferValid) {
+ // mono blend occurs for mixer threads only (not direct or offloaded)
+ // and is handled here if we're going directly to the sink.
+ if (requireMonoBlend()) {
+ mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
+ mNormalFrameCount, true /*limit*/);
+ }
- if (!hasFastMixer()) {
- // Balance must take effect after mono conversion.
- // We do it here if there is no FastMixer.
- // mBalance detects zero balance within the class for speed (not needed here).
- mBalance.setBalance(mMasterBalance.load());
- mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ if (!hasFastMixer()) {
+ // Balance must take effect after mono conversion.
+ // We do it here if there is no FastMixer.
+ // mBalance detects zero balance within the class for speed
+ // (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ }
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index ebfa1d6..0845a64 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2292,6 +2292,9 @@
mOperatingMode = operatingMode;
}
+ // Reset min expected duration when session is reconfigured.
+ mMinExpectedDuration = 0;
+
// In case called from configureStreams, abort queued input buffers not belonging to
// any pending requests.
if (mInputStream != NULL && notifyRequestThread) {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 8e4ff13..69163a5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -18,6 +18,7 @@
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+#include <algorithm>
#include <ctime>
#include <fstream>
@@ -1402,14 +1403,30 @@
const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
nsecs_t currentTime = systemTime();
- // Reset capture to present time offset if more than 1 second
- // between frames.
- if (t - mLastCaptureTime > kSpacingResetIntervalNs) {
+ // Reset capture to present time offset if:
+ // - More than 1 second between frames.
+ // - The frame duration deviates from multiples of vsync frame intervals.
+ nsecs_t captureInterval = t - mLastCaptureTime;
+ float captureToVsyncIntervalRatio = 1.0f * captureInterval / vsyncEventData.frameInterval;
+ float ratioDeviation = std::fabs(
+ captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
+ if (captureInterval > kSpacingResetIntervalNs ||
+ ratioDeviation >= kMaxIntervalRatioDeviation) {
+ nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
for (size_t i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
- if (vsyncEventData.frameTimelines[i].deadlineTimestamp >= currentTime) {
- mCaptureToPresentOffset =
- vsyncEventData.frameTimelines[i].expectedPresentationTime - t;
- break;
+ const auto& timeline = vsyncEventData.frameTimelines[i];
+ if (timeline.deadlineTimestamp >= currentTime &&
+ timeline.expectedPresentationTime > minPresentT) {
+ nsecs_t presentT = vsyncEventData.frameTimelines[i].expectedPresentationTime;
+ mCaptureToPresentOffset = presentT - t;
+ mLastCaptureTime = t;
+ mLastPresentTime = presentT;
+
+ // Move the expected presentation time back by 1/3 of frame interval to
+ // mitigate the time drift. Due to time drift, if we directly use the
+ // expected presentation time, often times 2 expected presentation time
+ // falls into the same VSYNC interval.
+ return presentT - vsyncEventData.frameInterval/3;
}
}
}
@@ -1425,16 +1442,27 @@
int minVsyncs = (mMinExpectedDuration - vsyncEventData.frameInterval / 2) /
vsyncEventData.frameInterval;
if (minVsyncs < 0) minVsyncs = 0;
- nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval + kTimelineThresholdNs;
- // Find best timestamp in the vsync timeline:
+ nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval;
+ // Find best timestamp in the vsync timelines:
+ // - Only use at most 3 timelines to avoid long latency
// - closest to the ideal present time,
// - deadline timestamp is greater than the current time, and
// - the candidate present time is at least minInterval in the future
// compared to last present time.
- for (const auto& vsyncTime : vsyncEventData.frameTimelines) {
+ int maxTimelines = std::min(kMaxTimelines, (int)VsyncEventData::kFrameTimelinesLength);
+ float biasForShortDelay = 1.0f;
+ for (int i = 0; i < maxTimelines; i ++) {
+ const auto& vsyncTime = vsyncEventData.frameTimelines[i];
+ if (minVsyncs > 0) {
+ // Bias towards using smaller timeline index:
+ // i = 0: bias = 1
+ // i = maxTimelines-1: bias = -1
+ biasForShortDelay = 1.0 - 2.0 * i / (maxTimelines - 1);
+ }
if (std::abs(vsyncTime.expectedPresentationTime - idealPresentT) < minDiff &&
vsyncTime.deadlineTimestamp >= currentTime &&
- vsyncTime.expectedPresentationTime > mLastPresentTime + minInterval) {
+ vsyncTime.expectedPresentationTime >
+ mLastPresentTime + minInterval + biasForShortDelay * kTimelineThresholdNs) {
expectedPresentT = vsyncTime.expectedPresentationTime;
minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 4ab052b..3587af4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -426,8 +426,10 @@
nsecs_t mLastPresentTime = 0;
nsecs_t mCaptureToPresentOffset = 0;
static constexpr size_t kDisplaySyncExtraBuffer = 2;
- static constexpr nsecs_t kSpacingResetIntervalNs = 1000000000LL; // 1 second
+ static constexpr nsecs_t kSpacingResetIntervalNs = 50000000LL; // 50 millisecond
static constexpr nsecs_t kTimelineThresholdNs = 1000000LL; // 1 millisecond
+ static constexpr float kMaxIntervalRatioDeviation = 0.05f;
+ static constexpr int kMaxTimelines = 3;
nsecs_t syncTimestampToDisplayLocked(nsecs_t t);
// Re-space frames by delaying queueBuffer so that frame delivery has