Merge "Audio Policy: Do not stop getOutputForAttr on invalid Mix"
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index d50b353..2daeeac 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -48,7 +48,7 @@
# TODO: replace the following when apex has a way to auto-generate this list
# namespace.default.link.platform.shared_libs = %LLNDK_LIBRARIES%
# namespace.default.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
-namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
+namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
# FIXME: b/129552044
namespace.default.link.platform.shared_libs += libz.so
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index acf6999..99b613e 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5695,6 +5695,8 @@
* <p>In both cases, all images generated for a particular capture request still carry the same
* timestamps, so that they can be used to look up the matching frame number and
* onCaptureStarted callback.</p>
+ * <p>This tag is only applicable if the logical camera device supports concurrent physical
+ * streams from different physical cameras.</p>
*/
ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE = // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
@@ -7581,14 +7583,23 @@
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING = 10,
/**
- * <p>The camera device is a logical camera backed by two or more physical cameras. In
- * API level 28, the physical cameras must also be exposed to the application via
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. Starting from API
- * level 29, some or all physical cameras may not be independently exposed to the
- * application, in which case the physical camera IDs will not be available in
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the application
- * can still query the physical cameras' characteristics by calling
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>.</p>
+ * <p>The camera device is a logical camera backed by two or more physical cameras.</p>
+ * <p>In API level 28, the physical cameras must also be exposed to the application via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>.</p>
+ * <p>Starting from API level 29, some or all physical cameras may not be independently
+ * exposed to the application, in which case the physical camera IDs will not be
+ * available in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the
+ * application can still query the physical cameras' characteristics by calling
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>. Additionally,
+ * if a physical camera is hidden from camera ID list, the mandatory stream combinations
+ * for that physical camera must be supported through the logical camera using physical
+ * streams.</p>
+ * <p>Combinations of logical and physical streams, or physical streams from different
+ * physical cameras are not guaranteed. However, if the camera device supports
+ * {@link ACameraDevice_isSessionConfigurationSupported },
+ * application must be able to query whether a stream combination involving physical
+ * streams is supported by calling
+ * {@link ACameraDevice_isSessionConfigurationSupported }.</p>
* <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
* camera in the system. For an application that switches between front and back cameras,
* the recommendation is to switch between the first rear camera and the first front
@@ -7613,24 +7624,6 @@
* the same.</li>
* <li>The logical camera must be LIMITED or higher device.</li>
* </ul>
- * <p>Both the logical camera device and its underlying physical devices support the
- * mandatory stream combinations required for their device levels.</p>
- * <p>Additionally, for each guaranteed stream combination, the logical camera supports:</p>
- * <ul>
- * <li>For each guaranteed stream combination, the logical camera supports replacing one
- * logical {@link AIMAGE_FORMAT_YUV_420_888 YUV_420_888}
- * or raw stream with two physical streams of the same size and format, each from a
- * separate physical camera, given that the size and format are supported by both
- * physical cameras.</li>
- * <li>If the logical camera doesn't advertise RAW capability, but the underlying physical
- * cameras do, the logical camera will support guaranteed stream combinations for RAW
- * capability, except that the RAW streams will be physical streams, each from a separate
- * physical camera. This is usually the case when the physical cameras have different
- * sensor sizes.</li>
- * </ul>
- * <p>Using physical streams in place of a logical stream of the same size and format will
- * not slow down the frame rate of the capture, as long as the minimum frame duration
- * of the physical and logical streams are the same.</p>
* <p>A logical camera device's dynamic metadata may contain
* ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
* active physical camera Id. An active physical camera is the physical camera from which
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index bffab22..b18c897 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -300,7 +300,6 @@
OutputConfigurationWrapper outConfigW;
OutputConfiguration &outConfig = outConfigW.mOutputConfiguration;
outConfig.rotation = utils::convertToHidl(output->mRotation);
- outConfig.windowGroupId = -1; // ndk doesn't support inter OutputConfiguration buffer sharing.
outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
outConfig.windowHandles[0] = output->mWindow;
outConfig.physicalCameraId = output->mPhysicalCameraId;
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index 2f1006d..a03c7bc 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -99,7 +99,15 @@
return mOutputConfiguration;
}
- OutputConfigurationWrapper() = default;
+ OutputConfigurationWrapper() {
+ mOutputConfiguration.rotation = OutputConfiguration::Rotation::R0;
+ // The ndk currently doesn't support deferred surfaces
+ mOutputConfiguration.isDeferred = false;
+ mOutputConfiguration.width = 0;
+ mOutputConfiguration.height = 0;
+ // ndk doesn't support inter OutputConfiguration buffer sharing.
+ mOutputConfiguration.windowGroupId = -1;
+ };
OutputConfigurationWrapper(OutputConfiguration &outputConfiguration)
: mOutputConfiguration((outputConfiguration)) { }
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 5ff1c59..5a31c58 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -24,6 +24,7 @@
#include <stdlib.h>
#include <string.h>
+#include <log/log.h>
#include <utils/Log.h>
#include "AC4Parser.h"
@@ -81,7 +82,7 @@
const Trex *trex,
off64_t firstMoofOffset,
const sp<ItemTable> &itemTable,
- int32_t elstShiftStartTicks);
+ uint64_t elstShiftStartTicks);
virtual status_t init();
virtual media_status_t start();
@@ -147,7 +148,7 @@
// Start offset from composition time to presentation time.
// Support shift only for video tracks through mElstShiftStartTicks for now.
- int32_t mElstShiftStartTicks;
+ uint64_t mElstShiftStartTicks;
size_t parseNALSize(const uint8_t *data) const;
status_t parseChunk(off64_t *offset);
@@ -397,7 +398,6 @@
while (track) {
Track *next = track->next;
- AMediaFormat_delete(track->meta);
delete track;
track = next;
}
@@ -672,7 +672,6 @@
ALOGV("adding HEIF image track %u", imageIndex);
Track *track = new Track;
- track->next = NULL;
if (mLastTrack != NULL) {
mLastTrack->next = track;
} else {
@@ -682,10 +681,7 @@
track->meta = meta;
AMediaFormat_setInt32(track->meta, AMEDIAFORMAT_KEY_TRACK_ID, imageIndex);
- track->includes_expensive_metadata = false;
- track->skipTrack = false;
track->timescale = 1000000;
- track->elstShiftStartTicks = 0;
}
}
@@ -967,7 +963,6 @@
ALOGV("adding new track");
Track *track = new Track;
- track->next = NULL;
if (mLastTrack) {
mLastTrack->next = track;
} else {
@@ -975,15 +970,9 @@
}
mLastTrack = track;
- track->includes_expensive_metadata = false;
- track->skipTrack = false;
- track->timescale = 0;
track->meta = AMediaFormat_new();
AMediaFormat_setString(track->meta,
AMEDIAFORMAT_KEY_MIME, "application/octet-stream");
- track->has_elst = false;
- track->subsample_encryption = false;
- track->elstShiftStartTicks = 0;
}
off64_t stop_offset = *offset + chunk_size;
@@ -1033,6 +1022,7 @@
mLastTrack->skipTrack = true;
}
+
if (mLastTrack->skipTrack) {
ALOGV("skipping this track...");
Track *cur = mFirstTrack;
@@ -1053,6 +1043,21 @@
return OK;
}
+
+ // place things we built elsewhere into their final locations
+
+ // put aggregated tx3g data into the metadata
+ if (mLastTrack->mTx3gFilled > 0) {
+ ALOGV("Putting %zu bytes of tx3g data into meta data",
+ mLastTrack->mTx3gFilled);
+ AMediaFormat_setBuffer(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA,
+ mLastTrack->mTx3gBuffer, mLastTrack->mTx3gFilled);
+ // drop it now to reduce our footprint
+ free(mLastTrack->mTx3gBuffer);
+ mLastTrack->mTx3gBuffer = NULL;
+ }
+
} else if (chunk_type == FOURCC("moov")) {
mInitCheck = OK;
@@ -2553,41 +2558,55 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- void *data;
- size_t size = 0;
- if (!AMediaFormat_getBuffer(mLastTrack->meta,
- AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA, &data, &size)) {
- size = 0;
- }
-
- if ((chunk_size > SIZE_MAX) || (SIZE_MAX - chunk_size <= size)) {
+ // complain about ridiculous chunks
+ if (chunk_size > kMaxAtomSize) {
return ERROR_MALFORMED;
}
- uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
- if (buffer == NULL) {
+ // complain about empty atoms
+ if (chunk_data_size <= 0) {
+ ALOGE("b/124330204");
+ android_errorWriteLog(0x534e4554, "124330204");
return ERROR_MALFORMED;
}
- if (size > 0) {
- memcpy(buffer, data, size);
+ // should fill buffer based on "data_offset" and "chunk_data_size"
+ // instead of *offset and chunk_size;
+ // but we've been feeding the extra data to consumers for multiple releases and
+ // if those apps are compensating for it, we'd break them with such a change
+ //
+
+ if (mLastTrack->mTx3gSize - mLastTrack->mTx3gFilled < chunk_size) {
+ size_t growth = kTx3gGrowth;
+ if (growth < chunk_size) {
+ growth = chunk_size;
+ }
+ // although this disallows 2 tx3g atoms of nearly kMaxAtomSize...
+ if ((uint64_t) mLastTrack->mTx3gSize + growth > kMaxAtomSize) {
+ ALOGE("b/124330204 - too much space");
+ android_errorWriteLog(0x534e4554, "124330204");
+ return ERROR_MALFORMED;
+ }
+ uint8_t *updated = (uint8_t *)realloc(mLastTrack->mTx3gBuffer,
+ mLastTrack->mTx3gSize + growth);
+ if (updated == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->mTx3gBuffer = updated;
+ mLastTrack->mTx3gSize += growth;
}
- if ((size_t)(mDataSource->readAt(*offset, buffer + size, chunk_size))
+ if ((size_t)(mDataSource->readAt(*offset,
+ mLastTrack->mTx3gBuffer + mLastTrack->mTx3gFilled,
+ chunk_size))
< chunk_size) {
- delete[] buffer;
- buffer = NULL;
// advance read pointer so we don't end up reading this again
*offset += chunk_size;
return ERROR_IO;
}
- AMediaFormat_setBuffer(mLastTrack->meta,
- AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA, buffer, size + chunk_size);
-
- delete[] buffer;
-
+ mLastTrack->mTx3gFilled += chunk_size;
*offset += chunk_size;
break;
}
@@ -4040,7 +4059,7 @@
if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
track->elstShiftStartTicks = track->elst_media_time;
- ALOGV("video track->elstShiftStartTicks :%" PRId64, track->elst_media_time);
+ ALOGV("video track->elstShiftStartTicks :%" PRIu64, track->elstShiftStartTicks);
}
MPEG4Source *source = new MPEG4Source(
@@ -4450,7 +4469,7 @@
const Trex *trex,
off64_t firstMoofOffset,
const sp<ItemTable> &itemTable,
- int32_t elstShiftStartTicks)
+ uint64_t elstShiftStartTicks)
: mFormat(format),
mDataSource(dataSource),
mTimescale(timeScale),
@@ -4576,7 +4595,7 @@
// Start offset should be less or equal to composition time of first sample.
// ISO : sample_composition_time_offset, version 0 (unsigned) for major brands.
mElstShiftStartTicks = std::min(mElstShiftStartTicks,
- (*mCurrentSamples.begin()).compositionOffset);
+ (uint64_t)(*mCurrentSamples.begin()).compositionOffset);
}
return err;
}
@@ -4586,7 +4605,7 @@
err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
// Start offset should be less or equal to composition time of first sample.
// Composition time stamp of first sample cannot be negative.
- mElstShiftStartTicks = std::min(mElstShiftStartTicks, (int32_t)firstSampleCTS);
+ mElstShiftStartTicks = std::min(mElstShiftStartTicks, firstSampleCTS);
}
return err;
@@ -5239,8 +5258,30 @@
sampleCtsOffset = 0;
}
- if (size < (off64_t)sampleCount * bytesPerSample) {
- return -EINVAL;
+ if (bytesPerSample != 0) {
+ if (size < (off64_t)sampleCount * bytesPerSample) {
+ return -EINVAL;
+ }
+ } else {
+ if (sampleDuration == 0) {
+ ALOGW("b/123389881 sampleDuration == 0");
+ android_errorWriteLog(0x534e4554, "124389881 zero");
+ return -EINVAL;
+ }
+
+ // apply some sanity (vs strict legality) checks
+ //
+ // clamp the count of entries in the trun box, to avoid spending forever parsing
+ // this box. Clamping (vs error) lets us play *something*.
+ // 1 million is about 400 msecs on a Pixel3, should be no more than a couple seconds
+ // on the slowest devices.
+ static constexpr uint32_t kMaxTrunSampleCount = 1000000;
+ if (sampleCount > kMaxTrunSampleCount) {
+ ALOGW("b/123389881 clamp sampleCount(%u) @ kMaxTrunSampleCount(%u)",
+ sampleCount, kMaxTrunSampleCount);
+ android_errorWriteLog(0x534e4554, "124389881 count");
+
+ }
}
Sample tmp;
@@ -5496,7 +5537,11 @@
err = mSampleTable->getMetaDataForSample(
mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
if(err == OK) {
- cts -= mElstShiftStartTicks;
+ /* Composition Time Stamp cannot be negative. Some files have video Sample
+ * Time(STTS)delta with zero value(b/117402420). Hence subtract only
+ * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+ */
+ cts -= std::min(cts, mElstShiftStartTicks);
}
} else {
@@ -5780,8 +5825,8 @@
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
- ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRId32, seekTimeUs,
- mElstShiftStartTicks);
+ ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRIu64, seekTimeUs,
+ mElstShiftStartTicks);
int numSidxEntries = mSegments.size();
if (numSidxEntries != 0) {
@@ -5837,7 +5882,7 @@
off64_t offset = 0;
size_t size = 0;
- uint32_t cts = 0;
+ uint64_t cts = 0;
bool isSyncSample = false;
bool newBuffer = false;
if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
@@ -5869,7 +5914,11 @@
offset = smpl->offset;
size = smpl->size;
cts = mCurrentTime + smpl->compositionOffset;
- cts -= mElstShiftStartTicks;
+ /* Composition Time Stamp cannot be negative. Some files have video Sample
+ * Time(STTS)delta with zero value(b/117402420). Hence subtract only
+ * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+ */
+ cts -= std::min(cts, mElstShiftStartTicks);
mCurrentTime += smpl->duration;
isSyncSample = (mCurrentSampleIndex == 0);
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index fadfb50..031e793 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -83,12 +83,47 @@
bool includes_expensive_metadata;
bool skipTrack;
bool has_elst;
+ /* signed int, ISO Spec allows media_time = -1 for other use cases.
+ * but we don't support empty edits for now.
+ */
int64_t elst_media_time;
uint64_t elst_segment_duration;
- int32_t elstShiftStartTicks;
+ // unsigned int, shift start offset only when media_time > 0.
+ uint64_t elstShiftStartTicks;
bool subsample_encryption;
+
+ uint8_t *mTx3gBuffer;
+ size_t mTx3gSize, mTx3gFilled;
+
+
+ Track() {
+ next = NULL;
+ meta = NULL;
+ timescale = 0;
+ includes_expensive_metadata = false;
+ skipTrack = false;
+ has_elst = false;
+ elst_media_time = 0;
+ elstShiftStartTicks = 0;
+ subsample_encryption = false;
+ mTx3gBuffer = NULL;
+ mTx3gSize = mTx3gFilled = 0;
+ }
+ ~Track() {
+ if (meta) {
+ AMediaFormat_delete(meta);
+ meta = NULL;
+ }
+ free (mTx3gBuffer);
+ mTx3gBuffer = NULL;
+ }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(Track);
};
+ static const int kTx3gGrowth = 16 * 1024;
+
Vector<SidxEntry> mSidxEntries;
off64_t mMoofOffset;
bool mMoofFound;
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index cbf5921..a87ede3 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -20,6 +20,7 @@
AAudioStreamBuilder_setUsage; # introduced=28
AAudioStreamBuilder_setContentType; # introduced=28
AAudioStreamBuilder_setInputPreset; # introduced=28
+ AAudioStreamBuilder_setAllowedCapturePolicy; # introduced=29
AAudioStreamBuilder_setSessionId; # introduced=28
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
@@ -49,6 +50,7 @@
AAudioStream_getUsage; # introduced=28
AAudioStream_getContentType; # introduced=28
AAudioStream_getInputPreset; # introduced=28
+ AAudioStream_getAllowedCapturePolicy; # introduced=29
AAudioStream_getFramesWritten;
AAudioStream_getFramesRead;
AAudioStream_getSessionId; # introduced=28
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index cb243a0..958bb2e 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -196,3 +196,10 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_full_queue",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_full_queue.cpp"],
+ shared_libs: ["libaaudio"],
+}
diff --git a/media/libaaudio/tests/test_full_queue.cpp b/media/libaaudio/tests/test_full_queue.cpp
new file mode 100644
index 0000000..12d4fa3
--- /dev/null
+++ b/media/libaaudio/tests/test_full_queue.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test whether a stream dies if it is written to after a delay.
+// Maybe because the message queue from the AAudio service fills up.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int64_t kTimeoutNanos = kNanosPerSecond / 2;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+static void checkFullQueue(aaudio_performance_mode_t perfMode,
+ int32_t sleepMillis) {
+ std::unique_ptr<float[]> buffer = std::make_unique<float[]>(
+ kNumFrames * kChannelCount);
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ AAudioStreamBuilder_setChannelCount(aaudioBuilder, kChannelCount);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ AAudioStream *aaudioStream = nullptr;
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder,
+ &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ // Sleep for awhile. This might kill the stream.
+ usleep(sleepMillis * 1000); // 1000 millis in a microsecond
+
+ for (int i = 0; i < 10; i++) {
+ const aaudio_result_t result = AAudioStream_write(aaudioStream,
+ buffer.get(),
+ kNumFrames,
+ kTimeoutNanos);
+ EXPECT_EQ(kNumFrames, result);
+ if (kNumFrames != result) break;
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_50) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_200) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_1000) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 1000 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_50) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_200) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_1000) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 1000 /* sleepMillis */);
+}
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 6c8e6a4..c08dddb 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -49,7 +49,7 @@
// Instrument audio signal power logging.
// Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
// Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
Return<void> ret = mStream->getAudioProperties(
[&](auto sr, auto m, auto f) {
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 7d5ce05..4818fd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -33,7 +33,7 @@
mStream(stream) {
// Instrument audio signal power logging.
// Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
mStreamPowerLog.init(mStream->get_sample_rate(mStream),
mStream->get_channels(mStream),
mStream->get_format(mStream));
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 4dece96..50826c5 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -368,13 +368,13 @@
ALOGV("Use shared memory: %zu", length);
transferBuf = buf;
} else {
- ALOGD("Large buffer %zu without IMemory!", length);
+ ALOGV("Large buffer %zu without IMemory!", length);
ret = mGroup->acquire_buffer(
(MediaBufferBase **)&transferBuf, false /* nonBlocking */, length);
if (ret != OK
|| transferBuf == nullptr
|| transferBuf->mMemory == nullptr) {
- ALOGW("Failed to acquire shared memory, size %zu, ret %d",
+ ALOGV("Failed to acquire shared memory, size %zu, ret %d",
length, ret);
if (transferBuf != nullptr) {
transferBuf->release();
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 5ab6e37..381df24 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -124,7 +124,8 @@
return false;
}
- static const size_t kBinderMediaBuffers = 4; // buffers managed by BnMediaSource
+ // align buffer count with video request size in NuMediaExtractor::selectTrack()
+ static const size_t kBinderMediaBuffers = 8; // buffers managed by BnMediaSource
static const size_t kTransferSharedAsSharedThreshold = 4 * 1024; // if >= shared, else inline
static const size_t kTransferInlineAsSharedThreshold = 8 * 1024; // if >= shared, else inline
static const size_t kInlineMaxTransfer = 64 * 1024; // Binder size limited to BINDER_VM_SIZE.
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 71a7370..5061024 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -776,6 +776,17 @@
store->linkToDeath(codecDeathListener, 0);
codecDeathListeners.emplace_back(codecDeathListener);
}
+
+ store = ::android::hardware::media::c2::V1_0::
+ IComponentStore::getService("software");
+ if (store == nullptr) {
+ ALOGD("Codec2 swcodec service is not available");
+ } else {
+ sp<ServiceDeathNotifier> codecDeathListener =
+ new ServiceDeathNotifier(store, p, MEDIACODEC_PROCESS_DEATH);
+ store->linkToDeath(codecDeathListener, 0);
+ codecDeathListeners.emplace_back(codecDeathListener);
+ }
}
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 0156ad2..a2cc13e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -328,6 +328,11 @@
const size_t *userData = (size_t *)mpegUserData->data();
for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
+ if (accessUnit->size() < userData[i]) {
+ ALOGW("b/129068792, skip invalid offset for user data");
+ android_errorWriteLog(0x534e4554, "129068792");
+ continue;
+ }
trackAdded |= parseMPEGUserDataUnit(
timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
}
@@ -337,6 +342,12 @@
// returns true if a new CC track is found
bool NuPlayer::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+ if (size < 9) {
+ ALOGW("b/129068792, MPEG user data size too small %zu", size);
+ android_errorWriteLog(0x534e4554, "129068792");
+ return false;
+ }
+
ABitReader br(data + 4, 5);
uint32_t user_identifier = br.getBits(32);
@@ -389,8 +400,14 @@
mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
br.skipBits(16);
} else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
- memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
- mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ if (mDTVCCPacket->capacity() - mDTVCCPacket->size() >= 2) {
+ memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+ mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ } else {
+ ALOGW("b/129068792, skip CC due to too much data(%zu, %zu)",
+ mDTVCCPacket->capacity(), mDTVCCPacket->size());
+ android_errorWriteLog(0x534e4554, "129068792");
+ }
br.skipBits(16);
} else if (cc_type == 0 || cc_type == 1) {
uint8_t cc_data_1 = br.getBits(8) & 0x7f;
@@ -477,6 +494,11 @@
size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
if (mSelectedTrack == (ssize_t)trackIndex) {
sp<ABuffer> ccPacket = new ABuffer(block_size);
+ if (ccPacket->capacity() == 0) {
+ ALOGW("b/129068792, no memory available, %zu", block_size);
+ android_errorWriteLog(0x534e4554, "129068792");
+ return false;
+ }
memcpy(ccPacket->data(), br.data(), block_size);
mCCMap.add(timeUs, ccPacket);
}
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index a713900..d78d729 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -431,19 +431,17 @@
void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
const size_t bufferSize = buffer->range_length();
const size_t frameSize = mRecord->frameSize();
- const int64_t timestampUs =
- mPrevSampleTimeUs +
- ((1000000LL * (bufferSize / frameSize)) +
- (mSampleRate >> 1)) / mSampleRate;
-
if (mNumFramesReceived == 0) {
buffer->meta_data().setInt64(kKeyAnchorTime, mStartTimeUs);
}
-
+ mNumFramesReceived += bufferSize / frameSize;
+ const int64_t timestampUs =
+ mStartTimeUs +
+ ((1000000LL * mNumFramesReceived) +
+ (mSampleRate >> 1)) / mSampleRate;
buffer->meta_data().setInt64(kKeyTime, mPrevSampleTimeUs);
buffer->meta_data().setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
mPrevSampleTimeUs = timestampUs;
- mNumFramesReceived += bufferSize / frameSize;
mBuffersReceived.push_back(buffer);
mFrameAvailableCondition.signal();
}
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 3dc345f..b975b38 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
#include <media/stagefright/DataURISource.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -39,15 +38,27 @@
AString tmp(&uri[5], commaPos - &uri[5]);
if (tmp.endsWith(";base64")) {
- AString encoded(commaPos + 1);
- // Strip CR and LF...
- for (size_t i = encoded.size(); i > 0;) {
- i--;
- if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
- encoded.erase(i, 1);
+ // strip all CR and LF characters.
+ const char *src = commaPos+1;
+ int len = strlen(src) + 1;
+ char *cleansed = (char *) malloc(len);
+ if (cleansed == NULL) return NULL;
+ char *keeping = cleansed;
+ int left = len;
+ for (int i = 0; i < len ; i++)
+ {
+ const char c = *src++;
+ if (c == '\r' || c == '\n') {
+ continue;
}
+ *keeping++ = c;
+ left--;
}
+ memset(keeping, 0, left);
+
+ AString encoded(cleansed);
+ free(cleansed);
buffer = decodeBase64(encoded);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1ec419a..2f13dc9 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -72,6 +72,7 @@
static const int64_t kInitialDelayTimeUs = 700000LL;
static const int64_t kMaxMetadataSize = 0x4000000LL; // 64MB max per-frame metadata size
static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL; // 30 minutes
+static const size_t kESDSScratchBufferSize = 10; // kMaxAtomSize in Mpeg4Extractor 64MB
static const char kMetaKey_Version[] = "com.android.version";
static const char kMetaKey_Manufacturer[] = "com.android.manufacturer";
@@ -3882,22 +3883,52 @@
mOwner->endBox();
}
+static void generateEsdsSize(size_t dataLength, size_t* sizeGenerated, uint8_t* buffer) {
+ size_t offset = 0, cur = 0;
+ size_t more = 0x00;
+ *sizeGenerated = 0;
+ /* Start with the LSB(7 bits) of dataLength and build the byte sequence upto MSB.
+ * Continuation flag(most significant bit) will be set on the first N-1 bytes.
+ */
+ do {
+ buffer[cur++] = (dataLength & 0x7f) | more;
+ dataLength >>= 7;
+ more = 0x80;
+ ++(*sizeGenerated);
+ } while (dataLength > 0u);
+ --cur;
+ // Reverse the newly formed byte sequence.
+ while (cur > offset) {
+ uint8_t tmp = buffer[cur];
+ buffer[cur--] = buffer[offset];
+ buffer[offset++] = tmp;
+ }
+}
+
void MPEG4Writer::Track::writeMp4aEsdsBox() {
- mOwner->beginBox("esds");
CHECK(mCodecSpecificData);
CHECK_GT(mCodecSpecificDataSize, 0u);
- // Make sure all sizes encode to a single byte.
- CHECK_LT(mCodecSpecificDataSize + 23, 128u);
+ uint8_t sizeESDBuffer[kESDSScratchBufferSize];
+ uint8_t sizeDCDBuffer[kESDSScratchBufferSize];
+ uint8_t sizeDSIBuffer[kESDSScratchBufferSize];
+ size_t sizeESD = 0;
+ size_t sizeDCD = 0;
+ size_t sizeDSI = 0;
+ generateEsdsSize(mCodecSpecificDataSize, &sizeDSI, sizeDSIBuffer);
+ generateEsdsSize(mCodecSpecificDataSize + sizeDSI + 14, &sizeDCD, sizeDCDBuffer);
+ generateEsdsSize(mCodecSpecificDataSize + sizeDSI + sizeDCD + 21, &sizeESD, sizeESDBuffer);
+
+ mOwner->beginBox("esds");
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt8(0x03); // ES_DescrTag
- mOwner->writeInt8(23 + mCodecSpecificDataSize);
+ mOwner->write(sizeESDBuffer, sizeESD);
mOwner->writeInt16(0x0000);// ES_ID
mOwner->writeInt8(0x00);
mOwner->writeInt8(0x04); // DecoderConfigDescrTag
- mOwner->writeInt8(15 + mCodecSpecificDataSize);
+ mOwner->write(sizeDCDBuffer, sizeDCD);
mOwner->writeInt8(0x40); // objectTypeIndication ISO/IEC 14492-2
mOwner->writeInt8(0x15); // streamType AudioStream
@@ -3912,7 +3943,7 @@
mOwner->writeInt32(avgBitrate);
mOwner->writeInt8(0x05); // DecoderSpecificInfoTag
- mOwner->writeInt8(mCodecSpecificDataSize);
+ mOwner->write(sizeDSIBuffer, sizeDSI);
mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
static const uint8_t kData2[] = {
@@ -3929,20 +3960,27 @@
CHECK(mCodecSpecificData);
CHECK_GT(mCodecSpecificDataSize, 0u);
- // Make sure all sizes encode to a single byte.
- CHECK_LT(23 + mCodecSpecificDataSize, 128u);
+ uint8_t sizeESDBuffer[kESDSScratchBufferSize];
+ uint8_t sizeDCDBuffer[kESDSScratchBufferSize];
+ uint8_t sizeDSIBuffer[kESDSScratchBufferSize];
+ size_t sizeESD = 0;
+ size_t sizeDCD = 0;
+ size_t sizeDSI = 0;
+ generateEsdsSize(mCodecSpecificDataSize, &sizeDSI, sizeDSIBuffer);
+ generateEsdsSize(mCodecSpecificDataSize + sizeDSI + 14, &sizeDCD, sizeDCDBuffer);
+ generateEsdsSize(mCodecSpecificDataSize + sizeDSI + sizeDCD + 21, &sizeESD, sizeESDBuffer);
mOwner->beginBox("esds");
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt8(0x03); // ES_DescrTag
- mOwner->writeInt8(23 + mCodecSpecificDataSize);
+ mOwner->write(sizeESDBuffer, sizeESD);
mOwner->writeInt16(0x0000); // ES_ID
mOwner->writeInt8(0x1f);
mOwner->writeInt8(0x04); // DecoderConfigDescrTag
- mOwner->writeInt8(15 + mCodecSpecificDataSize);
+ mOwner->write(sizeDCDBuffer, sizeDCD);
mOwner->writeInt8(0x20); // objectTypeIndication ISO/IEC 14492-2
mOwner->writeInt8(0x11); // streamType VisualStream
@@ -3960,7 +3998,7 @@
mOwner->writeInt8(0x05); // DecoderSpecificInfoTag
- mOwner->writeInt8(mCodecSpecificDataSize);
+ mOwner->write(sizeDSIBuffer, sizeDSI);
mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
static const uint8_t kData2[] = {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index d4e4000..b6b7784 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1052,8 +1052,9 @@
}
// Prevent possible integer overflow in downstream code.
- if ((uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
- ALOGE("buffer size is too big, width=%d, height=%d", mVideoWidth, mVideoHeight);
+ if (mVideoWidth < 0 || mVideoHeight < 0 ||
+ (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
+ ALOGE("Invalid size(s), width=%d, height=%d", mVideoWidth, mVideoHeight);
return BAD_VALUE;
}
}
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index c3d85ee..50e454c 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -831,7 +831,9 @@
}
void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
- if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
+ if (mStopping || mOutput.lock()->mEncoderReachedEOS) {
+ // Nothing to do
+ } else if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
sp<AMessage> params = new AMessage;
params->setInt32(PARAMETER_KEY_SUSPEND, true);
params->setInt64(PARAMETER_KEY_SUSPEND_TIME, pauseStartTimeUs);
diff --git a/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp b/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
index 41a9e98..621fda8 100644
--- a/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/test/amrnbdec_test.cpp
@@ -67,6 +67,7 @@
int bytesRead = fread(header, 1, kFileHeaderSize, fpInput);
if (bytesRead != kFileHeaderSize || memcmp(header, "#!AMR\n", kFileHeaderSize)) {
fprintf(stderr, "Invalid AMR-NB file\n");
+ fclose(fpInput);
return 1;
}
@@ -79,6 +80,7 @@
SNDFILE *handle = sf_open(argv[2], SFM_WRITE, &sfInfo);
if(!handle){
fprintf(stderr, "Could not create %s\n", argv[2]);
+ fclose(fpInput);
return 1;
}
@@ -87,6 +89,8 @@
int err = GSMInitDecode(&amrHandle, (Word8*)"AMRNBDecoder");
if(err != 0){
fprintf(stderr, "Error creating AMR-NB decoder instance\n");
+ fclose(fpInput);
+ sf_close(handle);
return 1;
}
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
index 7c094f3..7282de4 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
@@ -134,14 +134,16 @@
if(handle == 0)
{
printf("open dll error......");
- return -1;
+ ret = -1;
+ goto safe_exit;
}
pfunc = dlsym(handle, "voGetAMRWBEncAPI");
if(pfunc == 0)
{
printf("open function error......");
- return -1;
+ ret = -1;
+ goto safe_exit;
}
pGetAPI = (VOGETAUDIOENCAPI)pfunc;
@@ -150,7 +152,8 @@
if(returnCode)
{
printf("get APIs error......");
- return -1;
+ ret = -1;
+ goto safe_exit;
}
#else
ret = voGetAMRWBEncAPI(&AudioAPI);
@@ -253,7 +256,8 @@
fclose(fdst);
#ifdef LINUX
- dlclose(handle);
+ if (handle)
+ dlclose(handle);
#endif
return ret;
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
index 632088a..8899adc 100644
--- a/media/libstagefright/data/media_codecs_google_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -58,7 +58,7 @@
</MediaCodec>
<MediaCodec name="OMX.google.raw.decoder" type="audio/raw">
<Limit name="channel-count" max="8" />
- <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="sample-rate" ranges="8000-192000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
<MediaCodec name="OMX.google.flac.decoder" type="audio/flac">
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 47a9715..be2404d 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -67,7 +67,7 @@
<MediaCodec name="c2.android.raw.decoder" type="audio/raw">
<Alias name="OMX.google.raw.decoder" />
<Limit name="channel-count" max="8" />
- <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="sample-rate" ranges="8000-192000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
<MediaCodec name="c2.android.flac.decoder" type="audio/flac">
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index ec7ff57..96a8337 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -398,6 +398,13 @@
if (!moduleDevices.contains(device)) {
continue;
}
+
+ // removal of remote submix devices associated with a dynamic policy is
+ // handled by removeOutputProfile() and removeInputProfile()
+ if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+ continue;
+ }
+
device->detach();
// Only remove from dynamic list, not from declared list!!!
if (!hwModule->getDynamicDevices().contains(device)) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index b16b5dc..880a3d7 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -343,6 +343,20 @@
return writeUpMessageQueue(&command);
}
+bool AAudioServiceStreamBase::isUpMessageQueueBusy() {
+ std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
+ if (mUpMessageQueue == nullptr) {
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
+ return true;
+ }
+ int32_t framesAvailable = mUpMessageQueue->getFifoBuffer()
+ ->getFullFramesAvailable();
+ int32_t capacity = mUpMessageQueue->getFifoBuffer()
+ ->getBufferCapacityInFrames();
+ // Is it half full or more
+ return framesAvailable >= (capacity / 2);
+}
+
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
@@ -366,6 +380,13 @@
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
+ // It is not worth filling up the queue with timestamps.
+ // That can cause the stream to get suspended.
+ // So just drop the timestamp if the queue is getting full.
+ if (isUpMessageQueueBusy()) {
+ return AAUDIO_OK;
+ }
+
// Send a timestamp for the clock model.
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ffc768b..097bc64 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -312,6 +312,12 @@
android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
private:
+
+ /**
+ * @return true if the queue is getting full.
+ */
+ bool isUpMessageQueueBusy();
+
aaudio_handle_t mHandle = -1;
bool mFlowing = false;