Merge "EffectBundle: Proper bundle buffer forwarding of float data."
diff --git a/camera/Android.bp b/camera/Android.bp
index c76ae50..24b3918 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -29,12 +29,7 @@
// AIDL files for camera interfaces
// The headers for these interfaces will be available to any modules that
// include libcamera_client, at the path "aidl/package/path/BnFoo.h"
- "aidl/android/hardware/ICameraService.aidl",
- "aidl/android/hardware/ICameraServiceListener.aidl",
- "aidl/android/hardware/ICameraServiceProxy.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
-
+ ":libcamera_client_aidl",
// Source for camera interface parcelables, and manually-written interfaces
"Camera.cpp",
@@ -81,3 +76,25 @@
],
}
+
+// AIDL interface between camera clients and the camera service.
+filegroup {
+ name: "libcamera_client_aidl",
+ srcs: [
+ "aidl/android/hardware/ICameraService.aidl",
+ "aidl/android/hardware/ICameraServiceListener.aidl",
+ "aidl/android/hardware/ICameraServiceProxy.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
+ ],
+}
+
+// Extra AIDL files that are used by framework.jar but not libcamera_client
+// because they have hand-written native implementations.
+filegroup {
+ name: "libcamera_client_framework_aidl",
+ srcs: [
+ "aidl/android/hardware/ICamera.aidl",
+ "aidl/android/hardware/ICameraClient.aidl",
+ ],
+}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1793877..4f893f1 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1635,6 +1635,28 @@
*/
ACAMERA_CONTROL_ENABLE_ZSL = // byte (acamera_metadata_enum_android_control_enable_zsl_t)
ACAMERA_CONTROL_START + 41,
+ /**
+ * <p>Whether a significant scene change is detected within the currently-set AF
+ * region(s).</p>
+ *
+ * <p>Type: int32 (acamera_metadata_enum_android_control_af_scene_change_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>When the camera focus routine detects a change in the scene it is looking at,
+ * such as a large shift in camera viewpoint, significant motion in the scene, or a
+ * significant illumination change, this value will be set to DETECTED for a single capture
+ * result. Otherwise the value will be NOT_DETECTED. The threshold for detection is similar
+ * to what would trigger a new passive focus scan to begin in CONTINUOUS autofocus modes.</p>
+ * <p>afSceneChange may be DETECTED only if afMode is AF_MODE_CONTINUOUS_VIDEO or
+ * AF_MODE_CONTINUOUS_PICTURE. In other AF modes, afSceneChange must be NOT_DETECTED.</p>
+ * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE = // int32 (acamera_metadata_enum_android_control_af_scene_change_t)
+ ACAMERA_CONTROL_START + 42,
ACAMERA_CONTROL_END,
/**
@@ -6115,6 +6137,20 @@
} acamera_metadata_enum_android_control_enable_zsl_t;
+// ACAMERA_CONTROL_AF_SCENE_CHANGE
+typedef enum acamera_metadata_enum_acamera_control_af_scene_change {
+ /**
+ * <p>Scene change is not detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED = 0,
+
+ /**
+ * <p>Scene change is detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_DETECTED = 1,
+
+} acamera_metadata_enum_android_control_af_scene_change_t;
+
// ACAMERA_EDGE_MODE
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index ddc4b16..44ed034 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -80,6 +80,7 @@
static bool gDisplayHistogram;
static bool showProgress = true;
static String8 gWriteMP4Filename;
+static String8 gComponentNameOverride;
static sp<ANativeWindow> gSurface;
@@ -193,7 +194,10 @@
CHECK(!gPreferSoftwareCodec);
flags |= MediaCodecList::kHardwareCodecsOnly;
}
- rawSource = SimpleDecodingSource::Create(source, flags, gSurface);
+ rawSource = SimpleDecodingSource::Create(
+ source, flags, gSurface,
+ gComponentNameOverride.isEmpty() ? nullptr : gComponentNameOverride.c_str(),
+ !gComponentNameOverride.isEmpty());
if (rawSource == NULL) {
return;
}
@@ -618,6 +622,7 @@
fprintf(stderr, " -o playback audio\n");
fprintf(stderr, " -w(rite) filename (write to .mp4 file)\n");
fprintf(stderr, " -k seek test\n");
+ fprintf(stderr, " -O(verride) name of the component\n");
fprintf(stderr, " -x display a histogram of decoding times/fps "
"(video only)\n");
fprintf(stderr, " -q don't show progress indicator\n");
@@ -703,7 +708,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kxSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kO:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -732,6 +737,12 @@
break;
}
+ case 'O':
+ {
+ gComponentNameOverride.setTo(optarg);
+ break;
+ }
+
case 'l':
{
listComponents = true;
@@ -1073,7 +1084,7 @@
i, MediaExtractor::kIncludeExtensiveMetaData);
if (meta == NULL) {
- break;
+ continue;
}
const char *mime;
meta->findCString(kKeyMIMEType, &mime);
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..a99e174
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PROPERTIES_H_
+#define CLEARKEY_DRM_PROPERTIES_H_
+
+#include <utils/String8.h>
+
+namespace clearkeydrm {
+
+static const android::String8 kVendorKey("vendor");
+static const android::String8 kVendorValue("Google");
+static const android::String8 kVersionKey("version");
+static const android::String8 kVersionValue("1.0");
+static const android::String8 kPluginDescriptionKey("description");
+static const android::String8 kPluginDescriptionValue("ClearKey CDM");
+static const android::String8 kAlgorithmsKey("algorithms");
+static const android::String8 kAlgorithmsValue("");
+static const android::String8 kListenerTestSupportKey("listenerTestSupport");
+static const android::String8 kListenerTestSupportValue("true");
+
+static const android::String8 kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] =
+ {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_DRM_PROPERTIES_H_
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index ec07d87..7c43994 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -22,7 +22,7 @@
#include <utils/StrongPointer.h>
#include "DrmPlugin.h"
-
+#include "ClearKeyDrmProperties.h"
#include "Session.h"
namespace {
@@ -44,7 +44,22 @@
DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
: mSessionLibrary(sessionLibrary) {
+
mPlayPolicy.clear();
+ initProperties();
+}
+
+void DrmPlugin::initProperties() {
+ mStringProperties.clear();
+ mStringProperties.add(kVendorKey, kVendorValue);
+ mStringProperties.add(kVersionKey, kVersionValue);
+ mStringProperties.add(kPluginDescriptionKey, kPluginDescriptionValue);
+ mStringProperties.add(kAlgorithmsKey, kAlgorithmsValue);
+ mStringProperties.add(kListenerTestSupportKey, kListenerTestSupportValue);
+
+ Vector<uint8_t> testDeviceId;
+ testDeviceId.appendArray(kTestDeviceIdData, sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+ mByteArrayProperties.add(kDeviceIdKey, testDeviceId);
}
status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
@@ -122,21 +137,57 @@
return res;
}
+status_t DrmPlugin::getPropertyByteArray(
+ const String8& name, Vector<uint8_t>& value) const {
+ ssize_t index = mByteArrayProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+ value = mByteArrayProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyByteArray(
+ const String8& name, const Vector<uint8_t>& value) {
+ if (0 == name.compare(kDeviceIdKey)) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+
+ ssize_t status = mByteArrayProperties.replaceValueFor(name, value);
+ if (status >= 0) {
+ return android::OK;
+ }
+ ALOGE("Failed to set property byte array, key=%s", name.string());
+ return android::BAD_VALUE;
+}
+
status_t DrmPlugin::getPropertyString(
const String8& name, String8& value) const {
- if (name == "vendor") {
- value = "Google";
- } else if (name == "version") {
- value = "1.0";
- } else if (name == "description") {
- value = "ClearKey CDM";
- } else if (name == "algorithms") {
- value = "";
- } else if (name == "listenerTestSupport") {
- value = "true";
- } else {
- ALOGE("App requested unknown string property %s", name.string());
- return android::ERROR_DRM_CANNOT_HANDLE;
+ ssize_t index = mStringProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+ value = mStringProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyString(
+ const String8& name, const String8& value) {
+ String8 immutableKeys;
+ immutableKeys.appendFormat("%s,%s,%s,%s",
+ kAlgorithmsKey.string(), kPluginDescriptionKey.string(),
+ kVendorKey.string(), kVersionKey.string());
+ if (immutableKeys.contains(name.string())) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+
+ if (mStringProperties.add(name, value) < 0) {
+ ALOGE("Failed to set property string, key=%s", name.string());
+ return android::BAD_VALUE;
}
return android::OK;
}
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index f37a706..62bc86f 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -137,25 +137,13 @@
const String8& name, String8& value) const;
virtual status_t getPropertyByteArray(
- const String8& name, Vector<uint8_t>& value) const {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, Vector<uint8_t>& value) const;
virtual status_t setPropertyString(
- const String8& name, const String8& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, const String8& value);
virtual status_t setPropertyByteArray(
- const String8& name, const Vector<uint8_t>& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, const Vector<uint8_t>& value);
virtual status_t setCipherAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
@@ -242,9 +230,13 @@
}
private:
+ void initProperties();
void setPlayPolicy();
- android::KeyedVector<android::String8, android::String8> mPlayPolicy;
+ android::KeyedVector<String8, String8> mPlayPolicy;
+ android::KeyedVector<String8, String8> mStringProperties;
+ android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
+
SessionLibrary* mSessionLibrary;
DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
diff --git a/media/libaaudio/examples/input_monitor/Android.bp b/media/libaaudio/examples/input_monitor/Android.bp
index 2c3418d..d8c5843 100644
--- a/media/libaaudio/examples/input_monitor/Android.bp
+++ b/media/libaaudio/examples/input_monitor/Android.bp
@@ -2,6 +2,7 @@
name: "input_monitor",
gtest: false,
srcs: ["src/input_monitor.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
@@ -10,6 +11,7 @@
name: "input_monitor_callback",
gtest: false,
srcs: ["src/input_monitor_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
index 2b624a8..fa8fdc9 100644
--- a/media/libaaudio/examples/loopback/Android.bp
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -2,6 +2,7 @@
name: "aaudio_loopback",
gtest: false,
srcs: ["src/loopback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/examples/write_sine/Android.bp b/media/libaaudio/examples/write_sine/Android.bp
index f162e85..aa25e67 100644
--- a/media/libaaudio/examples/write_sine/Android.bp
+++ b/media/libaaudio/examples/write_sine/Android.bp
@@ -1,6 +1,7 @@
cc_test {
name: "write_sine",
srcs: ["src/write_sine.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
@@ -8,6 +9,7 @@
cc_test {
name: "write_sine_callback",
srcs: ["src/write_sine_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 2450920..fc5830a 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -25,6 +25,7 @@
#include "aaudio/AAudio.h"
#include <aaudio/AAudioTesting.h>
+#include <math.h>
#include "utility/AAudioUtilities.h"
@@ -50,44 +51,10 @@
return size;
}
-
// TODO expose and call clamp16_from_float function in primitives.h
static inline int16_t clamp16_from_float(float f) {
- /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
- * floating point significand. The normal shift is 3<<22, but the -15 offset
- * is used to multiply by 32768.
- */
- static const float offset = (float)(3 << (22 - 15));
- /* zero = (0x10f << 22) = 0x43c00000 (not directly used) */
- static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
- static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
-
- union {
- float f;
- int32_t i;
- } u;
-
- u.f = f + offset; /* recenter valid range */
- /* Now the valid range is represented as integers between [limneg, limpos].
- * Clamp using the fact that float representation (as an integer) is an ordered set.
- */
- if (u.i < limneg)
- u.i = -32768;
- else if (u.i > limpos)
- u.i = 32767;
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
-}
-
-// Same but without clipping.
-// Convert -1.0f to +1.0f to -32768 to +32767
-static inline int16_t floatToInt16(float f) {
- static const float offset = (float)(3 << (22 - 15));
- union {
- float f;
- int32_t i;
- } u;
- u.f = f + offset; /* recenter valid range */
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+ static const float scale = 1 << 15;
+ return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
}
static float clipAndClampFloatToPcm16(float sample, float scaler) {
@@ -188,13 +155,14 @@
int32_t samplesPerFrame,
float amplitude1,
float amplitude2) {
- float scaler = amplitude1 / SHORT_SCALE;
- float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+ float scaler = amplitude1;
+ float delta = (amplitude2 - amplitude1) / numFrames;
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
// No need to clip because int16_t range is inherently limited.
float sample = *source++ * scaler;
- *destination++ = floatToInt16(sample);
+ *destination++ = (int16_t) roundf(sample);
}
scaler += delta;
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 884a2b3..9f80695 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -111,3 +111,16 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_aaudio_monkey",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_aaudio_monkey.cpp"],
+ header_libs: ["libaaudio_example_utils"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
new file mode 100644
index 0000000..be54835
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Try to trigger bugs by playing randomly on multiple streams.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include "AAudioArgsParser.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+#include "SineGenerator.h"
+
+#define DEFAULT_TIMEOUT_NANOS (1 * NANOS_PER_SECOND)
+
+#define NUM_LOOPS 1000
+#define MAX_MICROS_DELAY (2 * 1000 * 1000)
+
+// TODO Consider adding an input stream.
+#define PROB_START (0.20)
+#define PROB_PAUSE (PROB_START + 0.10)
+#define PROB_FLUSH (PROB_PAUSE + 0.10)
+#define PROB_STOP (PROB_FLUSH + 0.10)
+#define PROB_CLOSE (PROB_STOP + 0.10)
+static_assert(PROB_CLOSE < 0.9, "Probability sum too high.");
+
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+void AAudioMonkeyErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+// This function is not thread safe. Only use this from a single thread.
+double nextRandomDouble() {
+ return drand48();
+}
+
+class AAudioMonkey : public AAudioSimplePlayer {
+public:
+
+ AAudioMonkey(int index, AAudioArgsParser *argParser)
+ : mArgParser(argParser)
+ , mIndex(index) {}
+
+ aaudio_result_t open() {
+ printf("Monkey # %d ---------------------------------------------- OPEN\n", mIndex);
+ double offset = mIndex * 50;
+ mSine1.setup(440.0, 48000);
+ mSine1.setSweep(300.0 + offset, 600.0 + offset, 5.0);
+ mSine2.setup(660.0, 48000);
+ mSine2.setSweep(350.0 + offset, 900.0 + offset, 7.0);
+
+ aaudio_result_t result = AAudioSimplePlayer::open(*mArgParser,
+ AAudioMonkeyDataCallback,
+ AAudioMonkeyErrorCallbackProc,
+ this);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - player.open() returned %d\n", result);
+ }
+
+ mArgParser->compareWithStream(getStream());
+ return result;
+ }
+
+ bool isOpen() {
+ return (getStream() != nullptr);
+
+ }
+ /**
+ *
+ * @return true if stream passes tests
+ */
+ bool validate() {
+ if (!isOpen()) return true; // closed is OK
+
+ // update and query stream state
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
+ return false;
+ }
+
+ int64_t framesRead = AAudioStream_getFramesRead(getStream());
+ int64_t framesWritten = AAudioStream_getFramesWritten(getStream());
+ int32_t xRuns = AAudioStream_getXRunCount(getStream());
+ // Print status
+ printf("%30s, framesWritten = %8lld, framesRead = %8lld, xRuns = %d\n",
+ AAudio_convertStreamStateToText(state),
+ (unsigned long long) framesWritten,
+ (unsigned long long) framesRead,
+ xRuns);
+
+ if (framesWritten < framesRead) {
+ printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
+ (int) (framesWritten - framesRead));
+ }
+ return true;
+ }
+
+ aaudio_result_t invoke() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (!isOpen()) {
+ result = open();
+ if (result != AAUDIO_OK) return result;
+ }
+
+ if (!validate()) {
+ return -1;
+ }
+
+ double dice = nextRandomDouble();
+ // Select an action based on a weighted probability.
+ if (dice < PROB_START) {
+ printf("start\n");
+ result = AAudioStream_requestStart(getStream());
+ } else if (dice < PROB_PAUSE) {
+ printf("pause\n");
+ result = AAudioStream_requestPause(getStream());
+ } else if (dice < PROB_FLUSH) {
+ printf("flush\n");
+ result = AAudioStream_requestFlush(getStream());
+ } else if (dice < PROB_STOP) {
+ printf("stop\n");
+ result = AAudioStream_requestStop(getStream());
+ } else if (dice < PROB_CLOSE) {
+ printf("close\n");
+ result = close();
+ } else {
+ printf("do nothing\n");
+ }
+
+ if (result == AAUDIO_ERROR_INVALID_STATE) {
+ printf(" got AAUDIO_ERROR_INVALID_STATE - expected from a monkey\n");
+ result = AAUDIO_OK;
+ }
+ if (result == AAUDIO_OK && isOpen()) {
+ if (!validate()) {
+ result = -1;
+ }
+ }
+ return result;
+ }
+
+ aaudio_data_callback_result_t renderAudio(
+ AAudioStream *stream,
+ void *audioData,
+ int32_t numFrames) {
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+private:
+ const AAudioArgsParser *mArgParser;
+ const int mIndex;
+ SineGenerator mSine1;
+ SineGenerator mSine2;
+};
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - AAudioMonkeyDataCallback needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ AAudioMonkey *monkey = (AAudioMonkey *) userData;
+ return monkey->renderAudio(stream, audioData, numFrames);
+}
+
+
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -i{seed} Initial random seed\n");
+ printf(" -t{count} number of monkeys in the Troop\n");
+}
+
+int main(int argc, const char **argv) {
+ AAudioArgsParser argParser;
+ std::vector<AAudioMonkey> monkeys;
+ aaudio_result_t result;
+ int numMonkeys = 1;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monkeys\n", argv[0]);
+
+ long int seed = (long int)getNanoseconds(); // different every time by default
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'i':
+ seed = atol(&arg[2]);
+ break;
+ case 't':
+ numMonkeys = atoi(&arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
+ }
+
+ srand48(seed);
+ printf("seed = %ld, nextRandomDouble() = %f\n", seed, nextRandomDouble());
+
+ for (int m = 0; m < numMonkeys; m++) {
+ monkeys.emplace_back(m, &argParser);
+ }
+
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ // pick a random monkey and invoke it
+ double dice = nextRandomDouble();
+ int monkeyIndex = floor(dice * numMonkeys);
+ printf("----------- Monkey #%d\n", monkeyIndex);
+ result = monkeys[monkeyIndex].invoke();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ // sleep some random time
+ dice = nextRandomDouble();
+ dice = dice * dice * dice; // skew towards smaller delays
+ int micros = (int) (dice * MAX_MICROS_DELAY);
+ usleep(micros);
+
+ // TODO consider making this multi-threaded, one thread per monkey, to catch more bugs
+ }
+
+ printf("PASS\n");
+ return EXIT_SUCCESS;
+
+error:
+ printf("FAIL - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ usleep(1000 * 1000); // give me time to stop the logcat
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
index 5c53982..93226ba 100644
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -15,13 +15,13 @@
*/
#include <iostream>
+#include <math.h>
#include <gtest/gtest.h>
#include "utility/AAudioUtilities.h"
#include "utility/LinearRamp.h"
-
TEST(test_linear_ramp, linear_ramp_segments) {
LinearRamp ramp;
const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
@@ -32,40 +32,40 @@
ramp.setLengthInFrames(8);
ramp.setTarget(8.0f);
- ASSERT_EQ(8, ramp.getLengthInFrames());
+ EXPECT_EQ(8, ramp.getLengthInFrames());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(0.0f, levelFrom);
- ASSERT_EQ(4.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(0.0f, levelFrom);
+ EXPECT_EQ(4.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(0.0f, destination[0]);
- ASSERT_EQ(1.0f, destination[1]);
- ASSERT_EQ(2.0f, destination[2]);
- ASSERT_EQ(3.0f, destination[3]);
+ EXPECT_EQ(0.0f, destination[0]);
+ EXPECT_EQ(1.0f, destination[1]);
+ EXPECT_EQ(2.0f, destination[2]);
+ EXPECT_EQ(3.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
@@ -80,29 +80,101 @@
ramp.setLengthInFrames(4);
ramp.setTarget(8.0f);
ramp.forceCurrent(4.0f);
- ASSERT_EQ(4.0f, ramp.getCurrent());
+ EXPECT_EQ(4.0f, ramp.getCurrent());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
+constexpr int16_t kMaxI16 = INT16_MAX;
+constexpr int16_t kMinI16 = INT16_MIN;
+constexpr int16_t kHalfI16 = 16384;
+constexpr int16_t kTenthI16 = 3277;
+
+//void AAudioConvert_floatToPcm16(const float *source,
+// int16_t *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, float_to_i16) {
+ const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16, kMinI16};
+
+ AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
+
+//void AAudioConvert_pcm16ToFloat(const int16_t *source,
+// float *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, i16_to_float) {
+ const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ float destination[count];
+ const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
+
+ AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_NEAR(expected[i], destination[i], 0.0001f);
+ }
+
+}
+
+//void AAudio_linearRamp(const int16_t *source,
+// int16_t *destination,
+// int32_t numFrames,
+// int32_t samplesPerFrame,
+// float amplitude1,
+// float amplitude2);
+TEST(test_linear_ramp, ramp_i16_to_i16) {
+ const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ // Ramp will sweep from -1 to almost +1
+ const int16_t expected[count] = {
+ -1, // from -1.00
+ -1, // from -0.75
+ -1, // from -0.55, round away from zero
+ 0, // from -0.25, round up to zero
+ 0, // from 0.00
+ 0, // from 0.25, round down to zero
+ 1, // from 0.50, round away from zero
+ 1 // from 0.75
+ };
+
+ // sweep across zero to test symmetry
+ constexpr float amplitude1 = -1.0;
+ constexpr float amplitude2 = 1.0;
+ AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index 9e505d5..de386da 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -41,10 +41,76 @@
// Test AAudioStream_setBufferSizeInFrames()
+constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+
+//int foo() { // To fix Android Studio formatting when editing.
+TEST(test_various, aaudio_stop_when_open) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+// Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+// Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+// Create an AAudioStream using the Builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+//int boo() { // To fix Android Studio formatting when editing.
+TEST(test_various, aaudio_flush_when_started) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+// Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+// Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+// Create an AAudioStream using the Builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_STARTING, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, AAudioStream_requestFlush(aaudioStream));
+
+ state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
//int main() { // To fix Android Studio formatting when editing.
TEST(test_various, aaudio_set_buffer_size) {
-
- aaudio_result_t result = AAUDIO_OK;
int32_t bufferCapacity;
int32_t framesPerBurst = 0;
int32_t actualSize = 0;
@@ -103,5 +169,4 @@
AAudioStream_close(aaudioStream);
AAudioStreamBuilder_delete(aaudioBuilder);
- printf(" result = %d = %s\n", result, AAudio_convertResultToText(result));
}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 98e8d95..bedde43 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -20,7 +20,7 @@
// The headers for these interfaces will be available to any modules that
// include libaudioclient, at the path "aidl/package/path/BnFoo.h"
"aidl/android/media/IAudioRecord.aidl",
- "aidl/android/media/IPlayer.aidl",
+ ":libaudioclient_aidl",
"AudioEffect.cpp",
"AudioPolicy.cpp",
@@ -70,3 +70,11 @@
],
},
}
+
+// AIDL interface between libaudioclient and framework.jar
+filegroup {
+ name: "libaudioclient_aidl",
+ srcs: [
+ "aidl/android/media/IPlayer.aidl",
+ ],
+}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 2432cac..741d084 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -69,8 +69,7 @@
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
}
@@ -97,10 +96,9 @@
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
- mProxy(NULL),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mProxy(NULL)
{
- mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+ (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
uid, pid, pAttributes, selectedDeviceId);
}
@@ -151,6 +149,11 @@
const audio_attributes_t* pAttributes,
audio_port_handle_t selectedDeviceId)
{
+ status_t status = NO_ERROR;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
"uid %d, pid %d",
@@ -170,7 +173,8 @@
case TRANSFER_CALLBACK:
if (cbf == NULL) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
@@ -178,14 +182,16 @@
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mTransfer = transferType;
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
if (pAttributes == NULL) {
@@ -209,16 +215,18 @@
// AudioFlinger capture only supports linear PCM
if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
ALOGE("Format %#x is not linear pcm", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_input_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
+ channelCount = audio_channel_count_from_in_mask(channelMask);
mChannelCount = channelCount;
if (audio_is_linear_pcm(format)) {
@@ -227,28 +235,24 @@
mFrameSize = sizeof(uint8_t);
}
- // mFrameCount is initialized in openRecord_l
+ // mFrameCount is initialized in createRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
- // mNotificationFramesAct is initialized in openRecord_l
+ // mNotificationFramesAct is initialized in createRecord_l
- if (sessionId == AUDIO_SESSION_ALLOCATE) {
- mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- } else {
- mSessionId = sessionId;
- }
+ mSessionId = sessionId;
ALOGV("set(): mSessionId %d", mSessionId);
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -263,7 +267,7 @@
}
// create the IAudioRecord
- status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
+ status = createRecord_l(0 /*epoch*/, mOpPackageName);
if (status != NO_ERROR) {
if (mAudioRecordThread != 0) {
@@ -271,10 +275,9 @@
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
// TODO: add audio hardware input latency here
mLatency = (1000LL * mFrameCount) / mSampleRate;
@@ -289,7 +292,9 @@
mFramesRead = 0;
mFramesReadServerOffset = 0;
- return NO_ERROR;
+exit:
+ mStatus = status;
+ return status;
}
// -------------------------------------------------------------------------
@@ -540,70 +545,29 @@
}
// must be called with mLock held
-status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
+status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+ IAudioFlinger::CreateRecordInput input;
+ IAudioFlinger::CreateRecordOutput output;
+ audio_session_t originalSessionId;
+ sp<media::IAudioRecord> record;
+ void *iMemPointer;
+ audio_track_cblk_t* cblk;
+ status_t status;
+
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- audio_io_handle_t input;
-
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioRecord is re-created.
- status_t status;
-
- // Not a conventional loop, but a retry loop for at most two iterations total.
- // Try first maybe with FAST flag then try again without FAST flag if that fails.
- // Exits loop normally via a return at the bottom, or with error via a break.
- // The sp<> references will be dropped when re-entering scope.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
- for (;;) {
- audio_config_base_t config = {
- .sample_rate = mSampleRate,
- .channel_mask = mChannelMask,
- .format = mFormat
- };
- mRoutedDeviceId = mSelectedDeviceId;
- status = AudioSystem::getInputForAttr(&mAttributes, &input,
- mSessionId,
- // FIXME compare to AudioTrack
- mClientPid,
- mClientUid,
- &config,
- mFlags, &mRoutedDeviceId, &mPortId);
-
- if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
- ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
- "format %#x, channel mask %#x, flags %#x",
- mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
- return BAD_VALUE;
- }
-
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
-#if 0
- size_t afFrameCount;
- status = AudioSystem::getFrameCount(input, &afFrameCount);
- if (status != NO_ERROR) {
- ALOGE("getFrameCount(input=%d) status %d", input, status);
- break;
- }
-#endif
-
- uint32_t afSampleRate;
- status = AudioSystem::getSamplingRate(input, &afSampleRate);
- if (status != NO_ERROR) {
- ALOGE("getSamplingRate(input=%d) status %d", input, status);
- break;
- }
- if (mSampleRate == 0) {
- mSampleRate = afSampleRate;
- }
-
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
@@ -622,66 +586,41 @@
if (!useCaseAllowed) {
ALOGW("AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
convertTransferToText(mTransfer));
- }
-
- // sample rates must also match
- bool sampleRateAllowed = mSampleRate == afSampleRate;
- if (!sampleRateAllowed) {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
- mSampleRate, afSampleRate);
- }
-
- bool fastAllowed = useCaseAllowed && sampleRateAllowed;
- if (!fastAllowed) {
mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
AUDIO_INPUT_FLAG_RAW));
- AudioSystem::releaseInput(input, mSessionId);
- continue; // retry
}
}
- // The notification frame count is the period between callbacks, as suggested by the client
- // but moderated by the server. For record, the calculations are done entirely on server side.
- size_t notificationFrames = mNotificationFramesReq;
- size_t frameCount = mReqFrameCount;
-
- audio_input_flags_t flags = mFlags;
-
- pid_t tid = -1;
+ input.attr = mAttributes;
+ input.config.sample_rate = mSampleRate;
+ input.config.channel_mask = mChannelMask;
+ input.config.format = mFormat;
+ input.clientInfo.clientUid = mClientUid;
+ input.clientInfo.clientPid = mClientPid;
+ input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (mAudioRecordThread != 0) {
- tid = mAudioRecordThread->getTid();
+ input.clientInfo.clientTid = mAudioRecordThread->getTid();
}
}
+ input.opPackageName = opPackageName;
- size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
- // but we will still need the original value also
- audio_session_t originalSessionId = mSessionId;
+ input.flags = mFlags;
+ // The notification frame count is the period between callbacks, as suggested by the client
+ // but moderated by the server. For record, the calculations are done entirely on server side.
+ input.frameCount = mReqFrameCount;
+ input.notificationFrameCount = mNotificationFramesReq;
+ input.selectedDeviceId = mSelectedDeviceId;
+ input.sessionId = mSessionId;
+ originalSessionId = mSessionId;
- sp<IMemory> iMem; // for cblk
- sp<IMemory> bufferMem;
- sp<media::IAudioRecord> record = audioFlinger->openRecord(input,
- mSampleRate,
- mFormat,
- mChannelMask,
- opPackageName,
- &temp,
- &flags,
- mClientPid,
- tid,
- mClientUid,
- &mSessionId,
- ¬ificationFrames,
- iMem,
- bufferMem,
- &status,
- mPortId);
- ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
- "session ID changed from %d to %d", originalSessionId, mSessionId);
+ record = audioFlinger->createRecord(input,
+ output,
+ &status);
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
- break;
+ goto exit;
}
ALOG_ASSERT(record != 0);
@@ -689,41 +628,41 @@
// so we are no longer responsible for releasing it.
mAwaitBoost = false;
- if (mFlags & AUDIO_INPUT_FLAG_FAST) {
- if (flags & AUDIO_INPUT_FLAG_FAST) {
- ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
- mAwaitBoost = true;
- } else {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
- mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
- AUDIO_INPUT_FLAG_RAW));
- continue; // retry
- }
+ if (output.flags & AUDIO_INPUT_FLAG_FAST) {
+ ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
+ mReqFrameCount, output.frameCount);
+ mAwaitBoost = true;
}
- mFlags = flags;
+ mFlags = output.flags;
+ mRoutedDeviceId = output.selectedDeviceId;
+ mSessionId = output.sessionId;
+ mSampleRate = output.sampleRate;
- if (iMem == 0) {
+ if (output.cblk == 0) {
ALOGE("Could not get control block");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- void *iMemPointer = iMem->pointer();
+ iMemPointer = output.cblk ->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
+ cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
// Starting address of buffers in shared memory.
// The buffers are either immediately after the control block,
// or in a separate area at discretion of server.
void *buffers;
- if (bufferMem == 0) {
+ if (output.buffers == 0) {
buffers = cblk + 1;
} else {
- buffers = bufferMem->pointer();
+ buffers = output.buffers->pointer();
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
}
@@ -733,43 +672,42 @@
mDeathNotifier.clear();
}
mAudioRecord = record;
- mCblkMemory = iMem;
- mBufferMemory = bufferMem;
+ mCblkMemory = output.cblk;
+ mBufferMemory = output.buffers;
IPCThreadState::self()->flushCommands();
mCblk = cblk;
- // note that temp is the (possibly revised) value of frameCount
- if (temp < frameCount || (frameCount == 0 && temp == 0)) {
- ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
+ // note that output.frameCount is the (possibly revised) value of mReqFrameCount
+ if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
+ ALOGW("Requested frameCount %zu but received frameCount %zu",
+ mReqFrameCount, output.frameCount);
}
- frameCount = temp;
// Make sure that application is notified with sufficient margin before overrun.
// The computation is done on server side.
- if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
+ if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
- mNotificationFramesReq, notificationFrames, frameCount);
+ mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
}
- mNotificationFramesAct = (uint32_t) notificationFrames;
-
+ mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
//mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
- if (mDeviceCallback != 0 && mInput != input) {
+ if (mDeviceCallback != 0 && mInput != output.inputId) {
if (mInput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mInput);
}
- AudioSystem::addAudioDeviceCallback(this, input);
+ AudioSystem::addAudioDeviceCallback(this, output.inputId);
}
// We retain a copy of the I/O handle, but don't own the reference
- mInput = input;
+ mInput = output.inputId;
mRefreshRemaining = true;
- mFrameCount = frameCount;
+ mFrameCount = output.frameCount;
// If IAudioRecord is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
- if (frameCount > mReqFrameCount) {
- mReqFrameCount = frameCount;
+ if (mFrameCount > mReqFrameCount) {
+ mReqFrameCount = mFrameCount;
}
// update proxy
@@ -780,17 +718,9 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
-
- // End of retry loop.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
- }
-
-// Arrive here on error, via a break
- AudioSystem::releaseInput(input, mSessionId);
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+exit:
+ mStatus = status;
+ // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
}
@@ -1222,12 +1152,12 @@
mFlags = mOrigFlags;
- // if the new IAudioRecord is created, openRecord_l() will modify the
+ // if the new IAudioRecord is created, createRecord_l() will modify the
// following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
// It will also delete the strong references on previous IAudioRecord and IMemory
Modulo<uint32_t> position(mProxy->getPosition());
mNewPosition = position + mUpdatePeriod;
- status_t result = openRecord_l(position, mOpPackageName);
+ status_t result = createRecord_l(position, mOpPackageName);
if (result == NO_ERROR) {
if (mActive) {
// callback thread or sync event hasn't changed
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 30f97ac..c8fa618 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -197,7 +197,7 @@
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
@@ -228,7 +228,7 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
@@ -284,6 +284,11 @@
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
{
+ status_t status;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
@@ -306,25 +311,29 @@
case TRANSFER_CALLBACK:
if (cbf == NULL || sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
if (sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_SHARED:
if (sharedBuffer == 0) {
ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
@@ -338,7 +347,8 @@
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
// handle default values first.
@@ -348,7 +358,8 @@
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("Invalid stream type %d", streamType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mStreamType = streamType;
@@ -380,16 +391,18 @@
// validate parameters
if (!audio_is_valid_format(format)) {
ALOGE("Invalid format %#x", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
// force direct flag if format is not linear PCM
@@ -424,7 +437,8 @@
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
@@ -455,12 +469,14 @@
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
ALOGE("notificationFrames=%d not permitted for non-fast track",
notificationFrames);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
if (frameCount > 0) {
ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
notificationFrames, frameCount);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -472,15 +488,15 @@
notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
}
mNotificationFramesAct = 0;
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -495,7 +511,7 @@
}
// create the IAudioTrack
- status_t status = createTrack_l();
+ status = createTrack_l();
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -503,10 +519,9 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -534,7 +549,10 @@
mFramesWrittenServerOffset = 0;
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
mVolumeHandler = new media::VolumeHandler();
- return NO_ERROR;
+
+exit:
+ mStatus = status;
+ return status;
}
// -------------------------------------------------------------------------
@@ -1278,15 +1296,16 @@
status_t AudioTrack::createTrack_l()
{
+ status_t status;
+ bool callbackAdded = false;
+
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- status_t status;
- bool callbackAdded = false;
-
{
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioTrack is re-created.
@@ -1355,7 +1374,10 @@
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
- goto error;
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
+ goto exit;
}
ALOG_ASSERT(track != 0);
@@ -1383,13 +1405,13 @@
if (iMem == 0) {
ALOGE("Could not get control block");
status = NO_INIT;
- goto error;
+ goto exit;
}
void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
@@ -1443,7 +1465,7 @@
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
}
@@ -1486,17 +1508,15 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
}
-error:
- if (callbackAdded) {
+exit:
+ if (status != NO_ERROR && callbackAdded) {
// note: mOutput is always valid is callbackAdded is true
AudioSystem::removeAudioDeviceCallback(this, mOutput);
}
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+
+ mStatus = status;
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 5cf2bdb..5db60f3 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -30,7 +30,7 @@
enum {
CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
- OPEN_RECORD,
+ CREATE_RECORD,
SAMPLE_RATE,
RESERVED, // obsolete, was CHANNEL_COUNT
FORMAT,
@@ -130,102 +130,39 @@
return track;
}
- virtual sp<media::IAudioRecord> openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status,
- audio_port_handle_t portId)
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status)
{
Parcel data, reply;
sp<media::IAudioRecord> record;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) input);
- data.writeInt32(sampleRate);
- data.writeInt32(format);
- data.writeInt32(channelMask);
- data.writeString16(opPackageName);
- size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
- data.writeInt64(frameCount);
- audio_input_flags_t lFlags = flags != NULL ? *flags : AUDIO_INPUT_FLAG_NONE;
- data.writeInt32(lFlags);
- data.writeInt32((int32_t) pid);
- data.writeInt32((int32_t) tid);
- data.writeInt32((int32_t) clientUid);
- audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
- if (sessionId != NULL) {
- lSessionId = *sessionId;
+
+ if (status == nullptr) {
+ return record;
}
- data.writeInt32(lSessionId);
- data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
- data.writeInt32(portId);
- cblk.clear();
- buffers.clear();
- status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
+
+ input.writeToParcel(&data);
+
+ status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
if (lStatus != NO_ERROR) {
- ALOGE("openRecord error: %s", strerror(-lStatus));
- } else {
- frameCount = reply.readInt64();
- if (pFrameCount != NULL) {
- *pFrameCount = frameCount;
- }
- lFlags = (audio_input_flags_t)reply.readInt32();
- if (flags != NULL) {
- *flags = lFlags;
- }
- lSessionId = (audio_session_t) reply.readInt32();
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
- size_t lNotificationFrames = (size_t) reply.readInt64();
- if (notificationFrames != NULL) {
- *notificationFrames = lNotificationFrames;
- }
- lStatus = reply.readInt32();
- record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
- cblk = interface_cast<IMemory>(reply.readStrongBinder());
- if (cblk != 0 && cblk->pointer() == NULL) {
- cblk.clear();
- }
- buffers = interface_cast<IMemory>(reply.readStrongBinder());
- if (buffers != 0 && buffers->pointer() == NULL) {
- buffers.clear();
- }
- if (lStatus == NO_ERROR) {
- if (record == 0) {
- ALOGE("openRecord should have returned an IAudioRecord");
- lStatus = UNKNOWN_ERROR;
- } else if (cblk == 0) {
- ALOGE("openRecord should have returned a cblk");
- lStatus = NO_MEMORY;
- }
- // buffers is permitted to be 0
- } else {
- if (record != 0 || cblk != 0 || buffers != 0) {
- ALOGE("openRecord returned an IAudioRecord, cblk, "
- "or buffers but with status %d", lStatus);
- }
- }
- if (lStatus != NO_ERROR) {
- record.clear();
- cblk.clear();
- buffers.clear();
- }
+ ALOGE("createRecord transaction error %d", lStatus);
+ *status = DEAD_OBJECT;
+ return record;
}
- if (status != NULL) {
- *status = lStatus;
+ *status = reply.readInt32();
+ if (*status != NO_ERROR) {
+ ALOGE("createRecord returned error %d", *status);
+ return record;
}
+
+ record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
+ if (record == 0) {
+ ALOGE("createRecord returned a NULL IAudioRecord with status OK");
+ *status = DEAD_OBJECT;
+ return record;
+ }
+ output.readFromParcel(&reply);
return record;
}
@@ -905,7 +842,7 @@
// TODO should select more wisely the items from the list
switch (code) {
case CREATE_TRACK:
- case OPEN_RECORD:
+ case CREATE_RECORD:
case SET_MASTER_VOLUME:
case SET_MASTER_MUTE:
case SET_STREAM_VOLUME:
@@ -948,37 +885,29 @@
output.writeToParcel(reply);
return NO_ERROR;
} break;
- case OPEN_RECORD: {
+ case CREATE_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t input = (audio_io_handle_t) data.readInt32();
- uint32_t sampleRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
- const String16& opPackageName = data.readString16();
- size_t frameCount = data.readInt64();
- audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
- pid_t pid = (pid_t) data.readInt32();
- pid_t tid = (pid_t) data.readInt32();
- int clientUid = data.readInt32();
- audio_session_t sessionId = (audio_session_t) data.readInt32();
- size_t notificationFrames = data.readInt64();
- audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
- sp<IMemory> cblk;
- sp<IMemory> buffers;
- status_t status = NO_ERROR;
- sp<media::IAudioRecord> record = openRecord(input,
- sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
- pid, tid, clientUid, &sessionId, ¬ificationFrames, cblk, buffers,
- &status, portId);
+
+ CreateRecordInput input;
+ if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
+ reply->writeInt32(DEAD_OBJECT);
+ return NO_ERROR;
+ }
+
+ status_t status;
+ CreateRecordOutput output;
+
+ sp<media::IAudioRecord> record = createRecord(input,
+ output,
+ &status);
+
LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
- reply->writeInt64(frameCount);
- reply->writeInt32(flags);
- reply->writeInt32(sessionId);
- reply->writeInt64(notificationFrames);
reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
reply->writeStrongBinder(IInterface::asBinder(record));
- reply->writeStrongBinder(IInterface::asBinder(cblk));
- reply->writeStrongBinder(IInterface::asBinder(buffers));
+ output.writeToParcel(reply);
return NO_ERROR;
} break;
case SAMPLE_RATE: {
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0397eec..970ae90 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -928,6 +928,7 @@
bool hasAttributes = data.readInt32() != 0;
if (hasAttributes) {
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
}
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
@@ -993,6 +994,7 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_attributes_t attr;
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
audio_session_t session = (audio_session_t)data.readInt32();
pid_t pid = (pid_t)data.readInt32();
@@ -1368,6 +1370,7 @@
data.read(&source, sizeof(struct audio_port_config));
audio_attributes_t attributes;
data.read(&attributes, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attributes);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
@@ -1418,6 +1421,15 @@
}
}
+void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
+{
+ const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+ if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+ android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
+ }
+ attr->tags[tagsMaxSize - 1] = '\0';
+}
+
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index 50ce78f..7572671 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -16,6 +16,7 @@
package android.media;
+/* Native code must specify namespace media (media::IAudioRecord) when referring to this class */
interface IAudioRecord {
/* After it's created the track is not active. Call start() to
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 108e326..247af9e 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -19,12 +19,13 @@
#define ANDROID_AUDIO_CLIENT_H
#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
#include <system/audio.h>
#include <utils/String16.h>
namespace android {
-class AudioClient {
+class AudioClient : public Parcelable {
public:
AudioClient() :
clientUid(-1), clientPid(-1), clientTid(-1), packageName("") {}
@@ -34,7 +35,7 @@
pid_t clientTid;
String16 packageName;
- status_t readFromParcel(Parcel *parcel) {
+ status_t readFromParcel(const Parcel *parcel) override {
clientUid = parcel->readInt32();
clientPid = parcel->readInt32();
clientTid = parcel->readInt32();
@@ -42,7 +43,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
parcel->writeInt32(clientUid);
parcel->writeInt32(clientPid);
parcel->writeInt32(clientTid);
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 51596a2..00c2a88 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -570,7 +570,7 @@
// caller must hold lock on mLock for all _l methods
- status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
+ status_t createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
// FIXME enum is faster than strcmp() for parameter 'from'
status_t restoreRecord_l(const char *from);
@@ -682,7 +682,6 @@
// May not match the app selection depending on other
// activity and connected devices
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
- audio_port_handle_t mPortId; // unique ID allocated by audio policy
};
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 66601da..24a6e22 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -231,7 +231,7 @@
audio_stream_type_t stream,
audio_session_t session);
- // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
+ // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
// or release it with releaseInput().
static status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 9061c26..57d9778 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -25,6 +25,7 @@
#include <utils/Errors.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
#include <media/AudioClient.h>
#include <media/IAudioTrack.h>
#include <media/IAudioFlingerClient.h>
@@ -50,9 +51,9 @@
* when calling createTrack() including arguments that will be updated by AudioFlinger
* and returned in CreateTrackOutput object
*/
- class CreateTrackInput {
+ class CreateTrackInput : public Parcelable {
public:
- status_t readFromParcel(Parcel *parcel) {
+ status_t readFromParcel(const Parcel *parcel) override {
/* input arguments*/
memset(&attr, 0, sizeof(audio_attributes_t));
if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
@@ -63,7 +64,9 @@
if (parcel->read(&config, sizeof(audio_config_t)) != NO_ERROR) {
return DEAD_OBJECT;
}
- (void)clientInfo.readFromParcel(parcel);
+ if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
if (parcel->readInt32() != 0) {
sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
@@ -82,7 +85,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
/* input arguments*/
(void)parcel->write(&attr, sizeof(audio_attributes_t));
(void)parcel->write(&config, sizeof(audio_config_t));
@@ -125,9 +128,9 @@
* when calling createTrack() including arguments that were passed as I/O for update by
* CreateTrackInput.
*/
- class CreateTrackOutput {
+ class CreateTrackOutput : public Parcelable {
public:
- status_t readFromParcel(Parcel *parcel) {
+ status_t readFromParcel(const Parcel *parcel) override {
/* input/output arguments*/
(void)parcel->read(&flags, sizeof(audio_output_flags_t));
frameCount = parcel->readInt64();
@@ -144,7 +147,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
/* input/output arguments*/
(void)parcel->write(&flags, sizeof(audio_output_flags_t));
(void)parcel->writeInt64(frameCount);
@@ -176,6 +179,140 @@
audio_io_handle_t outputId;
};
+ /* CreateRecordInput contains all input arguments sent by AudioRecord to AudioFlinger
+ * when calling createRecord() including arguments that will be updated by AudioFlinger
+ * and returned in CreateRecordOutput object
+ */
+ class CreateRecordInput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input arguments*/
+ memset(&attr, 0, sizeof(audio_attributes_t));
+ if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
+ memset(&config, 0, sizeof(audio_config_base_t));
+ if (parcel->read(&config, sizeof(audio_config_base_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ opPackageName = parcel->readString16();
+
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input arguments*/
+ (void)parcel->write(&attr, sizeof(audio_attributes_t));
+ (void)parcel->write(&config, sizeof(audio_config_base_t));
+ (void)clientInfo.writeToParcel(parcel);
+ (void)parcel->writeString16(opPackageName);
+
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ /* input */
+ audio_attributes_t attr;
+ audio_config_base_t config;
+ AudioClient clientInfo;
+ String16 opPackageName;
+
+ /* input/output */
+ audio_input_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+ };
+
+ /* CreateRecordOutput contains all output arguments returned by AudioFlinger to AudioRecord
+ * when calling createRecord() including arguments that were passed as I/O for update by
+ * CreateRecordInput.
+ */
+ class CreateRecordOutput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ sampleRate = parcel->readUint32();
+ (void)parcel->read(&inputId, sizeof(audio_io_handle_t));
+ if (parcel->readInt32() != 0) {
+ cblk = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (cblk == 0 || cblk->pointer() == NULL) {
+ return BAD_VALUE;
+ }
+ }
+ if (parcel->readInt32() != 0) {
+ buffers = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (buffers == 0 || buffers->pointer() == NULL) {
+ return BAD_VALUE;
+ }
+ }
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ (void)parcel->writeUint32(sampleRate);
+ (void)parcel->write(&inputId, sizeof(audio_io_handle_t));
+ if (cblk != 0) {
+ (void)parcel->writeInt32(1);
+ (void)parcel->writeStrongBinder(IInterface::asBinder(cblk));
+ } else {
+ (void)parcel->writeInt32(0);
+ }
+ if (buffers != 0) {
+ (void)parcel->writeInt32(1);
+ (void)parcel->writeStrongBinder(IInterface::asBinder(buffers));
+ } else {
+ (void)parcel->writeInt32(0);
+ }
+
+ return NO_ERROR;
+ }
+
+ /* input/output */
+ audio_input_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+
+ /* output */
+ uint32_t sampleRate;
+ audio_io_handle_t inputId;
+ sp<IMemory> cblk;
+ sp<IMemory> buffers;
+ };
+
// invariant on exit for all APIs that return an sp<>:
// (return value != 0) == (*status == NO_ERROR)
@@ -186,26 +323,9 @@
CreateTrackOutput& output,
status_t *status) = 0;
- virtual sp<media::IAudioRecord> openRecord(
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& callingPackage,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers, // return value 0 means it follows cblk
- status_t *status,
- audio_port_handle_t portId) = 0;
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status) = 0;
// FIXME Surprisingly, format/latency don't work for input handles
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 7c88e57..5558b77 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -178,6 +178,8 @@
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
+private:
+ void sanetizeAudioAttributes(audio_attributes_t* attr);
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
new file mode 100644
index 0000000..e9c1606
--- /dev/null
+++ b/media/libaudioclient/tests/Android.bp
@@ -0,0 +1,20 @@
+cc_defaults {
+ name: "libaudioclient_tests_defaults",
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+}
+
+cc_test {
+ name: "test_create_audiotrack",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["test_create_audiotrack.cpp"],
+ shared_libs: [
+ "libaudioclient",
+ "libcutils",
+ "libutils",
+ "libbinder",
+ ],
+ data: ["track_test_input_*.txt"],
+}
diff --git a/media/libaudioclient/tests/test_create_audiotrack.cpp b/media/libaudioclient/tests/test_create_audiotrack.cpp
new file mode 100644
index 0000000..b0351b2
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_audiotrack.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Handle a DISCONNECT by only opening and starting a new stream
+ * without stopping and closing the old one.
+ * This caused the new stream to use the old disconnected device.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryDealer.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/AudioTrack.h>
+
+#define MAX_INPUT_FILE_LINE_LENGTH 512
+#define MAX_OUTPUT_FILE_LINE_LENGTH 512
+
+#define NUM_ARGUMENTS 10
+#define VERSION_KEY "version"
+#define VERSION_VALUE "1.0"
+
+namespace android {
+
+int readLine(FILE *inputFile, char *line, int size) {
+ int ret = 0;
+ while (true) {
+ char *str = fgets(line, size, inputFile);
+ if (str == nullptr) {
+ ret = -1;
+ break;
+ }
+ if (feof(inputFile) != 0 || ferror(inputFile) != 0) {
+ ret = -1;
+ break;
+ }
+ if (strlen(str) != 0 && str[0] != '#') {
+ break;
+ }
+ }
+ return ret;
+}
+
+bool checkVersion(FILE *inputFile)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionKey[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionValue[MAX_INPUT_FILE_LINE_LENGTH];
+
+ if (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) != 0) {
+ fprintf(stderr, "Missing version in input file\n");
+ return false;
+ }
+
+ if (sscanf(line, " %s %s", versionKey, versionValue) != 2) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionKey, VERSION_KEY) != 0) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionValue, VERSION_VALUE) != 0) {
+ fprintf(stderr, "Wrong input file version %s expecting %s\n", versionValue, VERSION_VALUE);
+ return false;
+ }
+ return true;
+}
+
+void callback(int event __unused, void* user __unused, void *info __unused)
+{
+}
+
+void testTrack(FILE *inputFile, int outputFileFd)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ uint32_t testCount = 0;
+ Vector<String16> args;
+
+ if (inputFile == nullptr) {
+ sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_DEFAULT,
+ 0 /* sampleRate */,
+ AUDIO_FORMAT_DEFAULT,
+ AUDIO_CHANNEL_OUT_STEREO);
+ if (track == 0 || track->initCheck() != NO_ERROR) {
+ write(outputFileFd, "Error creating AudioTrack\n",
+ sizeof("Error creating AudioTrack\n"));
+ } else {
+ track->dump(outputFileFd, args);
+ }
+ return;
+ }
+
+ // check version
+ if (!checkVersion(inputFile)) {
+ return;
+ }
+
+ while (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) == 0) {
+ uint32_t sampleRate;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
+ size_t frameCount;
+ int32_t notificationFrames;
+ uint32_t useSharedBuffer;
+ audio_output_flags_t flags;
+ audio_session_t sessionId;
+ audio_usage_t usage;
+ audio_content_type_t contentType;
+ audio_attributes_t attributes;
+ sp<IMemory> sharedBuffer;
+ sp<MemoryDealer> heap;
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+ status_t status;
+ char statusStr[MAX_OUTPUT_FILE_LINE_LENGTH];
+ bool offload = false;
+ bool fast = false;
+
+ if (sscanf(line, " %u %x %x %zu %d %u %x %u %u %u",
+ &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames, &useSharedBuffer,
+ &flags, &sessionId, &usage, &contentType) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ continue;
+ }
+ testCount++;
+
+ if (useSharedBuffer != 0) {
+ size_t heapSize = audio_channel_count_from_out_mask(channelMask) *
+ audio_bytes_per_sample(format) * frameCount;
+ heap = new MemoryDealer(heapSize, "AudioTrack Heap Base");
+ sharedBuffer = heap->allocate(heapSize);
+ frameCount = 0;
+ notificationFrames = 0;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = format;
+ offload = true;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
+ fast = true;
+ }
+
+ memset(&attributes, 0, sizeof(attributes));
+ attributes.content_type = contentType;
+ attributes.usage = usage;
+
+ sp<AudioTrack> track = new AudioTrack();
+
+ track->set(AUDIO_STREAM_DEFAULT,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ (fast || offload) ? callback : nullptr,
+ nullptr,
+ notificationFrames,
+ sharedBuffer,
+ false,
+ sessionId,
+ ((fast && sharedBuffer == 0) || offload) ?
+ AudioTrack::TRANSFER_CALLBACK : AudioTrack::TRANSFER_DEFAULT,
+ offload ? &offloadInfo : nullptr,
+ getuid(),
+ getpid(),
+ &attributes,
+ false,
+ 1.0f,
+ AUDIO_PORT_HANDLE_NONE);
+ status = track->initCheck();
+ sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
+ write(outputFileFd, statusStr, strlen(statusStr));
+ if (status != NO_ERROR) {
+ continue;
+ }
+ track->dump(outputFileFd, args);
+ }
+}
+
+}; // namespace android
+
+
+int main(int argc, char **argv)
+{
+ FILE *inputFile = nullptr;
+ int outputFileFd = STDOUT_FILENO;
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ int ret = 0;
+
+ if (argc > 5) {
+ fprintf(stderr, "Usage: %s [-i input_params.txt] [-o output_params.txt]\n", argv[0]);
+ return 1;
+ }
+
+ argv++;
+ while (*argv) {
+ if (strcmp(*argv, "-i") == 0) {
+ argv++;
+ if (*argv) {
+ inputFile = fopen(*argv, "r");
+ if (inputFile == nullptr) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ if (strcmp(*argv, "-o") == 0) {
+ argv++;
+ if (*argv) {
+ outputFileFd = open(*argv, O_WRONLY|O_CREAT, mode);
+ if (outputFileFd < 0) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ argv++;
+ }
+ if (*argv) {
+ argv++;
+ }
+ }
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ android::testTrack(inputFile, outputFileFd);
+
+ if (inputFile) {
+ fclose(inputFile);
+ }
+ if (outputFileFd >= 0 && outputFileFd != STDOUT_FILENO) {
+ close(outputFileFd);
+ }
+
+ return ret;
+}
+
diff --git a/media/libaudioclient/tests/track_test_input_v1.0_ref.txt b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
new file mode 100644
index 0000000..b923ff3
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
@@ -0,0 +1,40 @@
+version 1.0
+# Input file for test_create_audiotrack
+# Add one line for each tested AudioTrack constructor with the following arguments:
+# sampleRate format channelMask frameCount notificationFrames sharedBuffer flags sessionId usage contentType
+# sample rate tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 24000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 16000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 8000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 44100 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 22050 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 11025 0x1 0x3 4410 2205 0 0x0 0 1 2
+# format tests
+ 48000 0x2 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x3 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x5 0x3 4800 2400 0 0x0 0 1 2
+# channel mask tests
+ 48000 0x1 0x1 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x3F 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x63F 4800 2400 0 0x0 0 1 2
+# framecount tests
+ 48000 0x1 0x3 0 0 0 0x0 0 1 2
+ 48000 0x1 0x3 48000 0 0 0x0 0 1 2
+ 48000 0x1 0x3 0 -2 0 0x4 0 1 2
+# shared memory tests
+ 48000 0x1 0x3 4800 2400 1 0x0 0 1 2
+ 48000 0x1 0x3 4800 2400 1 0x4 0 1 2
+# flags test
+ 48000 0x1 0x3 4800 2400 0 0x4 0 1 2
+ 48000 0x1 0x3 4800 2400 0 0x8 0 1 2
+ 44100 0x1000000 0x3 4800 2400 0 0x11 0 1 2
+# session tests
+ 48000 0x1 0x3 4800 2400 0 0x0 1001 1 2
+# attributes tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 0 0
+ 48000 0x1 0x3 4800 2400 0 0x0 0 2 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 4 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 5 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 11 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 12 1
diff --git a/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
new file mode 100644
index 0000000..5fe433c
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
@@ -0,0 +1,308 @@
+
+#### Test 1 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(49), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 2 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(57), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(24000), original sample rate(24000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 3 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(65), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(16000), original sample rate(16000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (350), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 4 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(73), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(8000), original sample rate(8000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (650), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 5 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(81), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 6 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(89), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(22050), original sample rate(22050), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 7 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(97), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(11025), original sample rate(11025), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (450), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 8 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(105), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(2), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 9 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(113), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(3), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 10 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(121), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(5), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 11 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(129), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(1), channel count(1)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 12 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(137), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3f), channel count(6)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 13 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(145), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(63f), channel count(8)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 14 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(153), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(1924), req. frame count(1924)
+ notif. frame count(962), req. notif. frame count(0), req. notif. per buff(0)
+ latency (90), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 15 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(161), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(48000), req. frame count(48000)
+ notif. frame count(24000), req. notif. frame count(0), req. notif. per buff(0)
+ latency (1050), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 16 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(169), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(480), req. frame count(480)
+ notif. frame count(240), req. notif. frame count(0), req. notif. per buff(2)
+ latency (60), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 17 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(177), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 18 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(185), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 19 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(193), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(240), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 20 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(201), flags(8)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 21 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(209), flags(11)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1000000), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(4800), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (204), selected device Id(0), routed device Id(2)
+ output(53) AF latency (96) AF frame count(262144) AF SampleRate(44100)
+
+#### Test 22 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(1001), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 23 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(217), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 24 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(225), flags(0)
+ stream type(0), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (140), selected device Id(0), routed device Id(1)
+ output(45) AF latency (40) AF frame count(960) AF SampleRate(48000)
+
+#### Test 25 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(233), flags(0)
+ stream type(4), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 26 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(241), flags(0)
+ stream type(5), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 27 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(249), flags(0)
+ stream type(10), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 28 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(257), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 4398a91..3e88c7c 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -5,6 +5,11 @@
srcs: ["src/EffectsConfig.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
shared_libs: [
"liblog",
"libtinyxml2",
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index f7df2b4..6b063e8 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -214,12 +214,12 @@
return mPkgName;
}
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int32_t pkgVersionCode) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int64_t pkgVersionCode) {
mPkgVersionCode = pkgVersionCode;
return *this;
}
-int32_t MediaAnalyticsItem::getPkgVersionCode() const {
+int64_t MediaAnalyticsItem::getPkgVersionCode() const {
return mPkgVersionCode;
}
@@ -640,7 +640,7 @@
mPid = data.readInt32();
mUid = data.readInt32();
mPkgName = data.readCString();
- mPkgVersionCode = data.readInt32();
+ mPkgVersionCode = data.readInt64();
mSessionID = data.readInt64();
mFinalized = data.readInt32();
mTimestamp = data.readInt64();
@@ -687,7 +687,7 @@
data->writeInt32(mPid);
data->writeInt32(mUid);
data->writeCString(mPkgName.c_str());
- data->writeInt32(mPkgVersionCode);
+ data->writeInt64(mPkgVersionCode);
data->writeInt64(mSessionID);
data->writeInt32(mFinalized);
data->writeInt64(mTimestamp);
@@ -766,7 +766,7 @@
if (version >= PROTO_V1) {
result.append(mPkgName);
- snprintf(buffer, sizeof(buffer), ":%d:", mPkgVersionCode);
+ snprintf(buffer, sizeof(buffer), ":%" PRId64 ":", mPkgVersionCode);
result.append(buffer);
}
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 5f9b916..ec9b660 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -173,8 +173,8 @@
MediaAnalyticsItem &setPkgName(AString);
AString getPkgName() const;
- MediaAnalyticsItem &setPkgVersionCode(int32_t);
- int32_t getPkgVersionCode() const;
+ MediaAnalyticsItem &setPkgVersionCode(int64_t);
+ int64_t getPkgVersionCode() const;
// our serialization code for binder calls
int32_t writeToParcel(Parcel *);
@@ -205,7 +205,7 @@
pid_t mPid;
uid_t mUid;
AString mPkgName;
- int32_t mPkgVersionCode;
+ int64_t mPkgVersionCode;
// let's reuse a binder connection
static sp<IMediaAnalyticsService> sAnalyticsService;
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index 478c460..f09e93d 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -230,6 +230,7 @@
}
// rounds value to precision based on log-distance from mean
+__attribute__((no_sanitize("signed-integer-overflow")))
inline double logRound(double x, double mean) {
// Larger values decrease range of high resolution and prevent overflow
// of a histogram on the console.
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 0c71487..a618676 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -37,6 +37,8 @@
"AudioPlayer.cpp",
"AudioSource.cpp",
"BufferImpl.cpp",
+ "CCodec.cpp",
+ "CCodecBufferChannel.cpp",
"CodecBase.cpp",
"CallbackDataSource.cpp",
"CallbackMediaSource.cpp",
@@ -89,6 +91,7 @@
"libdl",
"libdrmframework",
"libgui",
+ "libion",
"liblog",
"libmedia",
"libmedia_omx",
@@ -100,6 +103,7 @@
"libui",
"libutils",
"libmedia_helper",
+ "libstagefright_codec2",
"libstagefright_foundation",
"libstagefright_omx",
"libstagefright_omx_utils",
@@ -111,6 +115,11 @@
"android.hidl.allocator@1.0",
"android.hardware.cas.native@1.0",
"android.hardware.media.omx@1.0",
+ "android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.mapper@2.0",
+
+ // XXX: hack
+ "libstagefright_soft_c2avcdec",
],
static_libs: [
@@ -125,6 +134,9 @@
"libstagefright_esds",
"libstagefright_id3",
"libFLAC",
+
+ // XXX: hack
+ "libstagefright_codec2_vndk",
],
export_shared_lib_headers: [
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index fee3739..9fb6d34 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -24,11 +24,14 @@
#include <media/ICrypto.h>
#include <utils/NativeHandle.h>
+#include "include/Codec2Buffer.h"
#include "include/SecureBuffer.h"
#include "include/SharedMemoryBuffer.h"
namespace android {
+// SharedMemoryBuffer
+
SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
: MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
mMemory(mem) {
@@ -39,6 +42,8 @@
mTMemory(mem) {
}
+// SecureBuffer
+
SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
: MediaCodecBuffer(format, new ABuffer(nullptr, size)),
mPointer(ptr) {
@@ -59,4 +64,28 @@
return ICrypto::kDestinationTypeNativeHandle;
}
+// Codec2Buffer
+
+// static
+sp<Codec2Buffer> Codec2Buffer::allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block) {
+ C2WriteView writeView(block->map().get());
+ if (writeView.error() != C2_OK) {
+ return nullptr;
+ }
+ return new Codec2Buffer(format, new ABuffer(writeView.base(), writeView.capacity()), block);
+}
+
+C2ConstLinearBlock Codec2Buffer::share() {
+ return mBlock->share(offset(), size(), C2Fence());
+}
+
+Codec2Buffer::Codec2Buffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ const std::shared_ptr<C2LinearBlock> &block)
+ : MediaCodecBuffer(format, buffer),
+ mBlock(block) {
+}
+
} // namespace android
diff --git a/media/libstagefright/CCodec.cpp b/media/libstagefright/CCodec.cpp
new file mode 100644
index 0000000..080d00f
--- /dev/null
+++ b/media/libstagefright/CCodec.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CCodec"
+#include <utils/Log.h>
+
+// XXX: HACK
+#include "codecs/avcdec/C2SoftAvcDec.h"
+
+#include <thread>
+
+#include <gui/Surface.h>
+#include <media/stagefright/CCodec.h>
+
+#include "include/CCodecBufferChannel.h"
+
+using namespace std::chrono_literals;
+
+namespace android {
+
+namespace {
+
+class CCodecWatchdog : public AHandler {
+private:
+ enum {
+ kWhatRegister,
+ kWhatWatch,
+ };
+ constexpr static int64_t kWatchIntervalUs = 3000000; // 3 secs
+
+public:
+ static sp<CCodecWatchdog> getInstance() {
+ Mutexed<sp<CCodecWatchdog>>::Locked instance(sInstance);
+ if (*instance == nullptr) {
+ *instance = new CCodecWatchdog;
+ (*instance)->init();
+ }
+ return *instance;
+ }
+
+ ~CCodecWatchdog() = default;
+
+ void registerCodec(CCodec *codec) {
+ sp<AMessage> msg = new AMessage(kWhatRegister, this);
+ msg->setPointer("codec", codec);
+ msg->post();
+ }
+
+protected:
+ void onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatRegister: {
+ void *ptr = nullptr;
+ CHECK(msg->findPointer("codec", &ptr));
+ Mutexed<std::list<wp<CCodec>>>::Locked codecs(mCodecs);
+ codecs->emplace_back((CCodec *)ptr);
+ break;
+ }
+
+ case kWhatWatch: {
+ Mutexed<std::list<wp<CCodec>>>::Locked codecs(mCodecs);
+ for (auto it = codecs->begin(); it != codecs->end(); ) {
+ sp<CCodec> codec = it->promote();
+ if (codec == nullptr) {
+ it = codecs->erase(it);
+ continue;
+ }
+ codec->initiateReleaseIfStuck();
+ ++it;
+ }
+ msg->post(kWatchIntervalUs);
+ break;
+ }
+
+ default: {
+ TRESPASS("CCodecWatchdog: unrecognized message");
+ }
+ }
+ }
+
+private:
+ CCodecWatchdog() : mLooper(new ALooper) {}
+
+ void init() {
+ mLooper->setName("CCodecWatchdog");
+ mLooper->registerHandler(this);
+ mLooper->start();
+ (new AMessage(kWhatWatch, this))->post(kWatchIntervalUs);
+ }
+
+ static Mutexed<sp<CCodecWatchdog>> sInstance;
+
+ sp<ALooper> mLooper;
+ Mutexed<std::list<wp<CCodec>>> mCodecs;
+};
+
+Mutexed<sp<CCodecWatchdog>> CCodecWatchdog::sInstance;
+
+class CCodecListener : public C2Component::Listener {
+public:
+ CCodecListener(const std::shared_ptr<CCodecBufferChannel> &channel)
+ : mChannel(channel) {
+ }
+
+ virtual void onWorkDone_nb(
+ std::weak_ptr<C2Component> component,
+ std::vector<std::unique_ptr<C2Work>> workItems) override {
+ (void) component;
+ mChannel->onWorkDone(std::move(workItems));
+ }
+
+ virtual void onTripped_nb(
+ std::weak_ptr<C2Component> component,
+ std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+ // TODO
+ (void) component;
+ (void) settingResult;
+ }
+
+ virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+ // TODO
+ (void) component;
+ (void) errorCode;
+ }
+
+private:
+ std::shared_ptr<CCodecBufferChannel> mChannel;
+};
+
+} // namespace
+
+CCodec::CCodec()
+ : mChannel(new CCodecBufferChannel([this] (status_t err, enum ActionCode actionCode) {
+ mCallback->onError(err, actionCode);
+ })) {
+ CCodecWatchdog::getInstance()->registerCodec(this);
+}
+
+CCodec::~CCodec() {
+}
+
+std::shared_ptr<BufferChannelBase> CCodec::getBufferChannel() {
+ return mChannel;
+}
+
+void CCodec::initiateAllocateComponent(const sp<AMessage> &msg) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RELEASED) {
+ mCallback->onError(INVALID_OPERATION, ACTION_CODE_FATAL);
+ return;
+ }
+ state->mState = ALLOCATING;
+ }
+
+ AString componentName;
+ if (!msg->findString("componentName", &componentName)) {
+ // TODO: find componentName appropriate with the media type
+ }
+
+ sp<AMessage> allocMsg(new AMessage(kWhatAllocate, this));
+ allocMsg->setString("componentName", componentName);
+ allocMsg->post();
+}
+
+void CCodec::allocate(const AString &componentName) {
+ // TODO: use C2ComponentStore to create component
+ mListener.reset(new CCodecListener(mChannel));
+
+ std::shared_ptr<C2Component> comp(new C2SoftAvcDec(componentName.c_str(), 0));
+ comp->setListener_sm(mListener);
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATING) {
+ state->mState = RELEASED;
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = ALLOCATED;
+ state->mComp = comp;
+ }
+ mChannel->setComponent(comp);
+ mCallback->onComponentAllocated(comp->intf()->getName().c_str());
+}
+
+void CCodec::initiateConfigureComponent(const sp<AMessage> &format) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATED) {
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ }
+
+ sp<AMessage> msg(new AMessage(kWhatConfigure, this));
+ msg->setMessage("format", format);
+ msg->post();
+}
+
+void CCodec::configure(const sp<AMessage> &msg) {
+ sp<AMessage> inputFormat(new AMessage);
+ sp<AMessage> outputFormat(new AMessage);
+ if (status_t err = [=] {
+ AString mime;
+ if (!msg->findString("mime", &mime)) {
+ return BAD_VALUE;
+ }
+
+ int32_t encoder;
+ if (!msg->findInt32("encoder", &encoder)) {
+ encoder = false;
+ }
+
+ sp<RefBase> obj;
+ if (msg->findObject("native-window", &obj)) {
+ sp<Surface> surface = static_cast<Surface *>(obj.get());
+ setSurface(surface);
+ }
+
+ // TODO
+
+ return OK;
+ }() != OK) {
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ return;
+ }
+
+ {
+ Mutexed<Formats>::Locked formats(mFormats);
+ formats->mInputFormat = inputFormat;
+ formats->mOutputFormat = outputFormat;
+ }
+ mCallback->onComponentConfigured(inputFormat, outputFormat);
+}
+
+
+void CCodec::initiateCreateInputSurface() {
+ // TODO
+}
+
+void CCodec::initiateSetInputSurface(const sp<PersistentSurface> &surface) {
+ // TODO
+ (void) surface;
+}
+
+void CCodec::initiateStart() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATED) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = STARTING;
+ }
+
+ (new AMessage(kWhatStart, this))->post();
+}
+
+void CCodec::start() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != STARTING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+ c2_status_t err = comp->start();
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ sp<AMessage> inputFormat;
+ sp<AMessage> outputFormat;
+ {
+ Mutexed<Formats>::Locked formats(mFormats);
+ inputFormat = formats->mInputFormat;
+ outputFormat = formats->mOutputFormat;
+ }
+ mChannel->start(inputFormat, outputFormat);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != STARTING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RUNNING;
+ }
+ mCallback->onStartCompleted();
+}
+
+void CCodec::initiateShutdown(bool keepComponentAllocated) {
+ if (keepComponentAllocated) {
+ initiateStop();
+ } else {
+ initiateRelease();
+ }
+}
+
+void CCodec::initiateStop() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == ALLOCATED
+ || state->mState == RELEASED
+ || state->mState == STOPPING
+ || state->mState == RELEASING) {
+ // We're already stopped, released, or doing it right now.
+ state.unlock();
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ }
+ state->mState = STOPPING;
+ }
+
+ (new AMessage(kWhatStop, this))->post();
+}
+
+void CCodec::stop() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASING) {
+ state.unlock();
+ // We're already stopped or release is in progress.
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ } else if (state->mState != STOPPING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+ mChannel->stop();
+ status_t err = comp->stop();
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == STOPPING) {
+ state->mState = ALLOCATED;
+ }
+ }
+ mCallback->onStopCompleted();
+}
+
+void CCodec::initiateRelease(bool sendCallback /* = true */) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASED || state->mState == RELEASING) {
+ // We're already released or doing it right now.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ if (state->mState == ALLOCATING) {
+ state->mState = RELEASING;
+ // With the altered state allocate() would fail and clean up.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ state->mState = RELEASING;
+ }
+
+ std::thread([this, sendCallback] { release(sendCallback); }).detach();
+}
+
+void CCodec::release(bool sendCallback) {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASED) {
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ comp = state->mComp;
+ }
+ mChannel->stop();
+ comp->release();
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->mState = RELEASED;
+ state->mComp.reset();
+ }
+ if (sendCallback) {
+ mCallback->onReleaseCompleted();
+ }
+}
+
+status_t CCodec::setSurface(const sp<Surface> &surface) {
+ return mChannel->setSurface(surface);
+}
+
+void CCodec::signalFlush() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RUNNING) {
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ state->mState = FLUSHING;
+ }
+
+ (new AMessage(kWhatFlush, this))->post();
+}
+
+void CCodec::flush() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != FLUSHING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+
+ mChannel->stop();
+
+ std::list<std::unique_ptr<C2Work>> flushedWork;
+ c2_status_t err = comp->flush_sm(C2Component::FLUSH_COMPONENT, &flushedWork);
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ mChannel->flush(flushedWork);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->mState = FLUSHED;
+ }
+ mCallback->onFlushCompleted();
+}
+
+void CCodec::signalResume() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != FLUSHED) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RESUMING;
+ }
+
+ mChannel->start(nullptr, nullptr);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RESUMING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RUNNING;
+ }
+}
+
+void CCodec::signalSetParameters(const sp<AMessage> &msg) {
+ // TODO
+ (void) msg;
+}
+
+void CCodec::signalEndOfInputStream() {
+}
+
+void CCodec::signalRequestIDRFrame() {
+ // TODO
+}
+
+void CCodec::onMessageReceived(const sp<AMessage> &msg) {
+ TimePoint now = std::chrono::steady_clock::now();
+ switch (msg->what()) {
+ case kWhatAllocate: {
+ // C2ComponentStore::createComponent() should return within 100ms.
+ setDeadline(now + 150ms);
+ AString componentName;
+ CHECK(msg->findString("componentName", &componentName));
+ allocate(componentName);
+ break;
+ }
+ case kWhatConfigure: {
+ // C2Component::commit_sm() should return within 5ms.
+ setDeadline(now + 50ms);
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+ configure(format);
+ break;
+ }
+ case kWhatStart: {
+ // C2Component::start() should return within 500ms.
+ setDeadline(now + 550ms);
+ start();
+ break;
+ }
+ case kWhatStop: {
+ // C2Component::stop() should return within 500ms.
+ setDeadline(now + 550ms);
+ stop();
+ break;
+ }
+ case kWhatFlush: {
+ // C2Component::flush_sm() should return within 5ms.
+ setDeadline(now + 50ms);
+ flush();
+ break;
+ }
+ default: {
+ ALOGE("unrecognized message");
+ break;
+ }
+ }
+ setDeadline(TimePoint::max());
+}
+
+void CCodec::setDeadline(const TimePoint &newDeadline) {
+ Mutexed<TimePoint>::Locked deadline(mDeadline);
+ *deadline = newDeadline;
+}
+
+void CCodec::initiateReleaseIfStuck() {
+ {
+ Mutexed<TimePoint>::Locked deadline(mDeadline);
+ if (*deadline >= std::chrono::steady_clock::now()) {
+ // We're not stuck.
+ return;
+ }
+ }
+
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ initiateRelease();
+}
+
+} // namespace android
diff --git a/media/libstagefright/CCodecBufferChannel.cpp b/media/libstagefright/CCodecBufferChannel.cpp
new file mode 100644
index 0000000..9868cd4
--- /dev/null
+++ b/media/libstagefright/CCodecBufferChannel.cpp
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CCodecBufferChannel"
+#include <utils/Log.h>
+
+#include <numeric>
+#include <thread>
+
+#include <C2PlatformSupport.h>
+
+#include <android/hardware/cas/native/1.0/IDescrambler.h>
+#include <binder/MemoryDealer.h>
+#include <gui/Surface.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/MediaCodecBuffer.h>
+#include <system/window.h>
+
+#include "include/CCodecBufferChannel.h"
+#include "include/Codec2Buffer.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+
+using hardware::hidl_handle;
+using hardware::hidl_string;
+using hardware::hidl_vec;
+using namespace hardware::cas::V1_0;
+using namespace hardware::cas::native::V1_0;
+
+// TODO: get this info from component
+const static size_t kMinBufferArraySize = 16;
+
+void CCodecBufferChannel::OutputBuffers::flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) {
+ (void) flushedWork;
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+}
+
+namespace {
+
+template <class T>
+ssize_t findBufferSlot(
+ std::vector<T> *buffers,
+ size_t maxSize,
+ std::function<bool(const T&)> pred) {
+ auto it = std::find_if(buffers->begin(), buffers->end(), pred);
+ if (it == buffers->end()) {
+ if (buffers->size() < maxSize) {
+ buffers->emplace_back();
+ return buffers->size() - 1;
+ } else {
+ return -1;
+ }
+ }
+ return std::distance(buffers->begin(), it);
+}
+
+class LinearBuffer : public C2Buffer {
+public:
+ explicit LinearBuffer(C2ConstLinearBlock block) : C2Buffer({ block }) {}
+};
+
+class LinearInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ using CCodecBufferChannel::InputBuffers::InputBuffers;
+
+ virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ *buffer = nullptr;
+ ssize_t ret = findBufferSlot<wp<Codec2Buffer>>(
+ &mBuffers, kMinBufferArraySize,
+ [] (const auto &elem) { return elem.promote() == nullptr; });
+ if (ret < 0) {
+ return false;
+ }
+ std::shared_ptr<C2LinearBlock> block;
+
+ status_t err = mAlloc->fetchLinearBlock(
+ // TODO: proper max input size
+ 65536,
+ { 0, C2MemoryUsage::kSoftwareWrite },
+ &block);
+ if (err != OK) {
+ return false;
+ }
+
+ sp<Codec2Buffer> newBuffer = Codec2Buffer::allocate(mFormat, block);
+ mBuffers[ret] = newBuffer;
+ *index = ret;
+ *buffer = newBuffer;
+ return true;
+ }
+
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ auto it = std::find(mBuffers.begin(), mBuffers.end(), buffer);
+ if (it == mBuffers.end()) {
+ return nullptr;
+ }
+ sp<Codec2Buffer> codecBuffer = it->promote();
+ // We got sp<> reference from the caller so this should never happen..
+ CHECK(codecBuffer != nullptr);
+ return std::make_shared<LinearBuffer>(codecBuffer->share());
+ }
+
+ virtual void flush() override {
+ }
+
+private:
+ // Buffers we passed to the client. The index of a buffer matches what
+ // was passed in BufferCallback::onInputBufferAvailable().
+ std::vector<wp<Codec2Buffer>> mBuffers;
+
+ // Buffer array we passed to the client. This only gets initialized at
+ // getInput/OutputBufferArray() and when this is set we can't add more
+ // buffers.
+ std::vector<sp<Codec2Buffer>> mBufferArray;
+};
+
+class GraphicOutputBuffers : public CCodecBufferChannel::OutputBuffers {
+public:
+ using CCodecBufferChannel::OutputBuffers::OutputBuffers;
+
+ virtual bool registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) override {
+ *codecBuffer = nullptr;
+ ssize_t ret = findBufferSlot<BufferInfo>(
+ &mBuffers,
+ kMinBufferArraySize,
+ [] (const auto &elem) { return elem.mClientBuffer.promote() == nullptr; });
+ if (ret < 0) {
+ return false;
+ }
+ sp<MediaCodecBuffer> newBuffer = new MediaCodecBuffer(
+ mFormat,
+ buffer == nullptr ? kEmptyBuffer : kDummyBuffer);
+ mBuffers[ret] = { newBuffer, buffer };
+ *index = ret;
+ *codecBuffer = newBuffer;
+ return true;
+ }
+
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ auto it = std::find_if(
+ mBuffers.begin(), mBuffers.end(),
+ [buffer] (const auto &elem) {
+ return elem.mClientBuffer.promote() == buffer;
+ });
+ if (it == mBuffers.end()) {
+ return nullptr;
+ }
+ return it->mBufferRef;
+ }
+
+private:
+ static const sp<ABuffer> kEmptyBuffer;
+ static const sp<ABuffer> kDummyBuffer;
+
+ struct BufferInfo {
+ // wp<> of MediaCodecBuffer for MediaCodec.
+ wp<MediaCodecBuffer> mClientBuffer;
+ // Buffer reference to hold until mClientBuffer is valid.
+ std::shared_ptr<C2Buffer> mBufferRef;
+ };
+ // Buffers we passed to the client. The index of a buffer matches what
+ // was passed in BufferCallback::onInputBufferAvailable().
+ std::vector<BufferInfo> mBuffers;
+};
+
+const sp<ABuffer> GraphicOutputBuffers::kEmptyBuffer = new ABuffer(nullptr, 0);
+const sp<ABuffer> GraphicOutputBuffers::kDummyBuffer = new ABuffer(nullptr, 1);
+
+} // namespace
+
+CCodecBufferChannel::QueueGuard::QueueGuard(
+ CCodecBufferChannel::QueueSync &sync) : mSync(sync) {
+ std::unique_lock<std::mutex> l(mSync.mMutex);
+ if (mSync.mCount == -1) {
+ mRunning = false;
+ } else {
+ ++mSync.mCount;
+ mRunning = true;
+ }
+}
+
+CCodecBufferChannel::QueueGuard::~QueueGuard() {
+ if (mRunning) {
+ --mSync.mCount;
+ }
+}
+
+void CCodecBufferChannel::QueueSync::start() {
+ std::unique_lock<std::mutex> l(mMutex);
+ // If stopped, it goes to running state; otherwise no-op.
+ int32_t expected = -1;
+ mCount.compare_exchange_strong(expected, 0);
+}
+
+void CCodecBufferChannel::QueueSync::stop() {
+ std::unique_lock<std::mutex> l(mMutex);
+ if (mCount == -1) {
+ // no-op
+ return;
+ }
+ int32_t expected = 0;
+ while (!mCount.compare_exchange_weak(expected, -1)) {
+ std::this_thread::yield();
+ }
+}
+
+CCodecBufferChannel::CCodecBufferChannel(
+ const std::function<void(status_t, enum ActionCode)> &onError)
+ : mOnError(onError),
+ mInputBuffers(new LinearInputBuffers),
+ mOutputBuffers(new GraphicOutputBuffers),
+ mFrameIndex(0u),
+ mFirstValidFrameIndex(0u) {
+}
+
+CCodecBufferChannel::~CCodecBufferChannel() {
+ if (mCrypto != nullptr && mDealer != nullptr && mHeapSeqNum >= 0) {
+ mCrypto->unsetHeap(mHeapSeqNum);
+ }
+}
+
+void CCodecBufferChannel::setComponent(const std::shared_ptr<C2Component> &component) {
+ mComponent = component;
+ // TODO: get pool ID from params
+ std::shared_ptr<C2BlockPool> pool;
+ c2_status_t err = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, component, &pool);
+ if (err == C2_OK) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->setAlloc(pool);
+ }
+}
+
+status_t CCodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGW("No more buffers should be queued at current state.");
+ return -ENOSYS;
+ }
+
+ int64_t timeUs;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ int32_t flags = 0;
+ int32_t tmp = 0;
+ if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
+ flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ ALOGV("input EOS");
+ }
+ if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
+ flags |= C2BufferPack::FLAG_CODEC_CONFIG;
+ }
+ std::unique_ptr<C2Work> work(new C2Work);
+ work->input.flags = (C2BufferPack::flags_t)flags;
+ work->input.ordinal.timestamp = timeUs;
+ work->input.ordinal.frame_index = mFrameIndex++;
+ work->input.buffers.clear();
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ work->input.buffers.push_back((*buffers)->releaseBuffer(buffer));
+ }
+ // TODO: fill info's
+
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+
+ std::list<std::unique_ptr<C2Work>> items;
+ items.push_back(std::move(work));
+ return mComponent->queue_nb(&items);
+}
+
+status_t CCodecBufferChannel::queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer, bool secure, const uint8_t *key,
+ const uint8_t *iv, CryptoPlugin::Mode mode, CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ AString *errorDetailMsg) {
+ // TODO
+ (void) buffer;
+ (void) secure;
+ (void) key;
+ (void) iv;
+ (void) mode;
+ (void) pattern;
+ (void) subSamples;
+ (void) numSubSamples;
+ (void) errorDetailMsg;
+ return -ENOSYS;
+}
+
+status_t CCodecBufferChannel::renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+ ALOGV("renderOutputBuffer");
+ sp<MediaCodecBuffer> inBuffer;
+ size_t index;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
+ inBuffer = nullptr;
+ }
+ }
+ if (inBuffer != nullptr) {
+ mCallback->onInputBufferAvailable(index, inBuffer);
+ }
+
+ std::shared_ptr<C2Buffer> c2Buffer;
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ c2Buffer = (*buffers)->releaseBuffer(buffer);
+ }
+
+ Mutexed<sp<Surface>>::Locked surface(mSurface);
+ if (*surface == nullptr) {
+ ALOGE("no surface");
+ return OK;
+ }
+
+ std::list<C2ConstGraphicBlock> blocks = c2Buffer->data().graphicBlocks();
+ if (blocks.size() != 1u) {
+ ALOGE("# of graphic blocks expected to be 1, but %zu", blocks.size());
+ return UNKNOWN_ERROR;
+ }
+
+ sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(
+ blocks.front().handle(),
+ GraphicBuffer::CLONE_HANDLE,
+ blocks.front().width(),
+ blocks.front().height(),
+ HAL_PIXEL_FORMAT_YV12,
+ // TODO
+ 1,
+ (uint64_t)GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ // TODO
+ blocks.front().width()));
+
+ status_t result = (*surface)->attachBuffer(graphicBuffer.get());
+ if (result != OK) {
+ ALOGE("attachBuffer failed: %d", result);
+ return result;
+ }
+
+ // TODO: read and set crop
+
+ result = native_window_set_buffers_timestamp((*surface).get(), timestampNs);
+ ALOGW_IF(result != OK, "failed to set buffer timestamp: %d", result);
+
+ // TODO: fix after C2Fence implementation
+#if 0
+ const C2Fence &fence = blocks.front().fence();
+ result = ((ANativeWindow *)(*surface).get())->queueBuffer(
+ (*surface).get(), graphicBuffer.get(), fence.valid() ? fence.fd() : -1);
+#else
+ result = ((ANativeWindow *)(*surface).get())->queueBuffer(
+ (*surface).get(), graphicBuffer.get(), -1);
+#endif
+ if (result != OK) {
+ ALOGE("queueBuffer failed: %d", result);
+ return result;
+ }
+
+ return OK;
+}
+
+status_t CCodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
+ ALOGV("discardBuffer");
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (void) (*buffers)->releaseBuffer(buffer);
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (void) (*buffers)->releaseBuffer(buffer);
+ }
+ return OK;
+}
+
+#if 0
+void fillBufferArray_l(Mutexed<Buffers>::Locked &buffers) {
+ for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
+ sp<Codec2Buffer> buffer(buffers->mClientBuffer.get(i).promote());
+ if (buffer == nullptr) {
+ buffer = allocateBuffer_l(buffers->mAlloc);
+ }
+ buffers->mBufferArray.push_back(buffer);
+ }
+ while (buffers->mBufferArray.size() < kMinBufferArraySize) {
+ sp<Codec2Buffer> buffer = allocateBuffer_l(buffers->mAlloc);
+ // allocate buffer
+ buffers->mBufferArray.push_back(buffer);
+ }
+}
+#endif
+
+void CCodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ (void) array;
+ // TODO
+#if 0
+ array->clear();
+ Mutexed<Buffers>::Locked buffers(mInputBuffers);
+
+ if (!buffers->isArrayMode()) {
+ // mBufferArray is empty.
+ fillBufferArray_l(buffers);
+ }
+
+ for (const auto &buffer : buffers->mBufferArray) {
+ array->push_back(buffer);
+ }
+#endif
+}
+
+void CCodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ (void) array;
+ // TODO
+#if 0
+ array->clear();
+ Mutexed<Buffers>::Locked buffers(mOutputBuffers);
+
+ if (!buffers->isArrayMode()) {
+ if (linear) {
+ // mBufferArray is empty.
+ fillBufferArray_l(buffers);
+
+ // We need to replace the allocator so that the component only returns
+ // buffer from the array.
+ ArrayModeAllocator::Builder builder(buffers->mBufferArray);
+ for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
+ if (buffers->mClientBuffer.get(i).promote() != nullptr) {
+ builder.markUsing(i);
+ }
+ }
+ buffers->mAlloc.reset(builder.build());
+ } else {
+ for (int i = 0; i < X; ++i) {
+ buffers->mBufferArray.push_back(dummy buffer);
+ }
+ }
+ }
+
+ for (const auto &buffer : buffers->mBufferArray) {
+ array->push_back(buffer);
+ }
+#endif
+}
+
+void CCodecBufferChannel::start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+ if (inputFormat != nullptr) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->setFormat(inputFormat);
+ }
+ if (outputFormat != nullptr) {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (*buffers)->setFormat(outputFormat);
+ }
+
+ mSync.start();
+ // TODO: use proper buffer depth instead of this random value
+ for (size_t i = 0; i < kMinBufferArraySize; ++i) {
+ size_t index;
+ sp<MediaCodecBuffer> buffer;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &buffer)) {
+ buffers.unlock();
+ ALOGE("start: cannot allocate memory");
+ mOnError(NO_MEMORY, ACTION_CODE_FATAL);
+ buffers.lock();
+ return;
+ }
+ }
+ mCallback->onInputBufferAvailable(index, buffer);
+ }
+}
+
+void CCodecBufferChannel::stop() {
+ mSync.stop();
+ mFirstValidFrameIndex = mFrameIndex.load();
+}
+
+void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->flush();
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (*buffers)->flush(flushedWork);
+ }
+}
+
+void CCodecBufferChannel::onWorkDone(std::vector<std::unique_ptr<C2Work>> workItems) {
+ for (const auto &work : workItems) {
+ if (work->result != OK) {
+ ALOGE("work failed to complete: %d", work->result);
+ mOnError(work->result, ACTION_CODE_FATAL);
+ return;
+ }
+
+ // NOTE: MediaCodec usage supposedly have only one worklet
+ if (work->worklets.size() != 1u) {
+ ALOGE("incorrect number of worklets: %zu", work->worklets.size());
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ const std::unique_ptr<C2Worklet> &worklet = work->worklets.front();
+ if (worklet->output.ordinal.frame_index < mFirstValidFrameIndex) {
+ // Discard frames from previous generation.
+ continue;
+ }
+ // NOTE: MediaCodec usage supposedly have only one output stream.
+ if (worklet->output.buffers.size() != 1u) {
+ ALOGE("incorrect number of output buffers: %zu", worklet->output.buffers.size());
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ const std::shared_ptr<C2Buffer> &buffer = worklet->output.buffers[0];
+ // TODO: transfer infos() into buffer metadata
+
+ int32_t flags = 0;
+ if (worklet->output.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ ALOGV("output EOS");
+ }
+
+ size_t index;
+ sp<MediaCodecBuffer> outBuffer;
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if (!(*buffers)->registerBuffer(buffer, &index, &outBuffer)) {
+ ALOGE("unable to register output buffer");
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ outBuffer->meta()->setInt64("timeUs", worklet->output.ordinal.timestamp);
+ outBuffer->meta()->setInt32("flags", flags);
+ ALOGV("index = %zu", index);
+ mCallback->onOutputBufferAvailable(index, outBuffer);
+ }
+}
+
+status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) {
+ if (newSurface != nullptr) {
+ newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ }
+
+ Mutexed<sp<Surface>>::Locked surface(mSurface);
+// if (newSurface == nullptr) {
+// if (*surface != nullptr) {
+// ALOGW("cannot unset a surface");
+// return INVALID_OPERATION;
+// }
+// return OK;
+// }
+//
+// if (*surface == nullptr) {
+// ALOGW("component was not configured with a surface");
+// return INVALID_OPERATION;
+// }
+
+ *surface = newSurface;
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index fa5f37ec..b529940 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -419,8 +419,11 @@
*done = (++mNumFramesDecoded >= mNumFrames);
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
int32_t width, height;
- CHECK(outputFormat != NULL);
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
@@ -540,8 +543,11 @@
status_t ImageDecoder::onOutputReceived(
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
int32_t width, height;
- CHECK(outputFormat != NULL);
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1fe5f60..8db00f0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2951,212 +2951,215 @@
mGotStartKeyFrame = true;
}
////////////////////////////////////////////////////////////////////////////////
- if (mStszTableEntries->count() == 0) {
- mFirstSampleTimeRealUs = systemTime() / 1000;
- mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
- previousPausedDurationUs = mStartTimestampUs;
- }
- if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
- mResumed = false;
- }
- TimestampDebugHelperEntry timestampDebugEntry;
- timestampUs -= previousPausedDurationUs;
- timestampDebugEntry.pts = timestampUs;
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- if (mIsVideo) {
- /*
- * Composition time: timestampUs
- * Decoding time: decodingTimeUs
- * Composition time offset = composition time - decoding time
- */
- int64_t decodingTimeUs;
- CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
- decodingTimeUs -= previousPausedDurationUs;
-
- // ensure non-negative, monotonic decoding time
- if (mLastDecodingTimeUs < 0) {
- decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
- } else {
- // increase decoding time by at least the larger vaule of 1 tick and
- // 0.1 milliseconds. This needs to take into account the possible
- // delta adjustment in DurationTicks in below.
- decodingTimeUs = std::max(mLastDecodingTimeUs +
- std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
- }
-
- mLastDecodingTimeUs = decodingTimeUs;
- timestampDebugEntry.dts = decodingTimeUs;
- timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
- // Insert the timestamp into the mTimestampDebugHelper
- if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
- mTimestampDebugHelper.pop_front();
- }
- mTimestampDebugHelper.push_back(timestampDebugEntry);
-
- cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- timestampUs = decodingTimeUs;
- ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
- timestampUs, cttsOffsetTimeUs);
-
- // Update ctts box table if necessary
- currCttsOffsetTimeTicks =
- (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
+ if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
- // Force the first ctts table entry to have one single entry
- // so that we can do adjustment for the initial track start
- // time offset easily in writeCttsBox().
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
- cttsSampleCount = 0; // No sample in ctts box is pending
- } else {
- if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- cttsSampleCount = 1; // One sample in ctts box is pending
+ mFirstSampleTimeRealUs = systemTime() / 1000;
+ mStartTimestampUs = timestampUs;
+ mOwner->setStartTimestampUs(mStartTimestampUs);
+ previousPausedDurationUs = mStartTimestampUs;
+ }
+
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ TimestampDebugHelperEntry timestampDebugEntry;
+ timestampUs -= previousPausedDurationUs;
+ timestampDebugEntry.pts = timestampUs;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mIsVideo) {
+ /*
+ * Composition time: timestampUs
+ * Decoding time: decodingTimeUs
+ * Composition time offset = composition time - decoding time
+ */
+ int64_t decodingTimeUs;
+ CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+ decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- ++cttsSampleCount;
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
- }
- // Update ctts time offset range
- if (mStszTableEntries->count() == 0) {
- mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else {
- if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mLastDecodingTimeUs = decodingTimeUs;
+ timestampDebugEntry.dts = decodingTimeUs;
+ timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ timestampUs = decodingTimeUs;
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mStszTableEntries->count() == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mStszTableEntries->count() == 0) {
mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+ mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ }
}
}
- }
- if (mOwner->isRealTimeRecording()) {
- if (mIsAudio) {
- updateDriftTime(meta_data);
- }
- }
-
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
- trackName, timestampUs, previousPausedDurationUs);
- if (timestampUs > mTrackDurationUs) {
- mTrackDurationUs = timestampUs;
- }
-
- // We need to use the time scale based ticks, rather than the
- // timestamp itself to determine whether we have to use a new
- // stts entry, since we may have rounding errors.
- // The calculation is intended to reduce the accumulated
- // rounding errors.
- currDurationTicks =
- ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
- (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
- if (currDurationTicks < 0ll) {
- ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
- (long long)timestampUs, (long long)lastTimestampUs, trackName);
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- // if the duration is different for this sample, see if it is close enough to the previous
- // duration that we can fudge it and use the same value, to avoid filling the stts table
- // with lots of near-identical entries.
- // "close enough" here means that the current duration needs to be adjusted by less
- // than 0.1 milliseconds
- if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
- int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
- + (mTimeScale / 2)) / mTimeScale;
- if (deltaUs > -100 && deltaUs < 100) {
- // use previous ticks, and adjust timestamp as if it was actually that number
- // of ticks
- currDurationTicks = lastDurationTicks;
- timestampUs += deltaUs;
- }
- }
- mStszTableEntries->add(htonl(sampleSize));
- if (mStszTableEntries->count() > 2) {
-
- // Force the first sample to have its own stts entry so that
- // we can adjust its value later to maintain the A/V sync.
- if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- sampleCount = 1;
- } else {
- ++sampleCount;
+ if (mOwner->isRealTimeRecording()) {
+ if (mIsAudio) {
+ updateDriftTime(meta_data);
+ }
}
- }
- if (mSamplesHaveSameSize) {
- if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
- mSamplesHaveSameSize = false;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
- previousSampleSize = sampleSize;
- }
- ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
- trackName, timestampUs, lastTimestampUs);
- lastDurationUs = timestampUs - lastTimestampUs;
- lastDurationTicks = currDurationTicks;
- lastTimestampUs = timestampUs;
- if (isSync != 0) {
- addOneStssTableEntry(mStszTableEntries->count());
- }
-
- if (mTrackingProgressStatus) {
- if (mPreviousTrackTimeUs <= 0) {
- mPreviousTrackTimeUs = mStartTimestampUs;
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
}
- trackProgressStatus(timestampUs);
+
+ // We need to use the time scale based ticks, rather than the
+ // timestamp itself to determine whether we have to use a new
+ // stts entry, since we may have rounding errors.
+ // The calculation is intended to reduce the accumulated
+ // rounding errors.
+ currDurationTicks =
+ ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+ (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ if (currDurationTicks < 0ll) {
+ ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+ (long long)timestampUs, (long long)lastTimestampUs, trackName);
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+ mStszTableEntries->add(htonl(sampleSize));
+ if (mStszTableEntries->count() > 2) {
+
+ // Force the first sample to have its own stts entry so that
+ // we can adjust its value later to maintain the A/V sync.
+ if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ sampleCount = 1;
+ } else {
+ ++sampleCount;
+ }
+
+ }
+ if (mSamplesHaveSameSize) {
+ if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+ mSamplesHaveSameSize = false;
+ }
+ previousSampleSize = sampleSize;
+ }
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastDurationTicks = currDurationTicks;
+ lastTimestampUs = timestampUs;
+
+ if (isSync != 0) {
+ addOneStssTableEntry(mStszTableEntries->count());
+ }
+
+ if (mTrackingProgressStatus) {
+ if (mPreviousTrackTimeUs <= 0) {
+ mPreviousTrackTimeUs = mStartTimestampUs;
+ }
+ trackProgressStatus(timestampUs);
+ }
}
if (!hasMultipleTracks) {
size_t bytesWritten;
@@ -4331,9 +4334,12 @@
}
// patch up the mPrimaryItemId and count items with prop associations
+ uint16_t firstVisibleItemId = 0;
for (size_t index = 0; index < mItems.size(); index++) {
if (mItems[index].isPrimary) {
mPrimaryItemId = mItems[index].itemId;
+ } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+ firstVisibleItemId = mItems[index].itemId;
}
if (!mItems[index].properties.empty()) {
@@ -4342,8 +4348,13 @@
}
if (mPrimaryItemId == 0) {
- ALOGW("didn't find primary, using first item");
- mPrimaryItemId = mItems[0].itemId;
+ if (firstVisibleItemId > 0) {
+ ALOGW("didn't find primary, using first visible item");
+ mPrimaryItemId = firstVisibleItemId;
+ } else {
+ ALOGW("no primary and no visible item, using first item");
+ mPrimaryItemId = mItems[0].itemId;
+ }
}
beginBox("meta");
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 4fedab6..677d25a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -28,6 +28,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
#include <gui/BufferQueue.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -44,6 +45,7 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/CCodec.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
@@ -549,8 +551,11 @@
//static
sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, bool nameIsType) {
- // at this time only ACodec specifies a mime type.
- if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+ static bool ccodecEnabled = property_get_bool("debug.stagefright.ccodec", false);
+ if (ccodecEnabled && !nameIsType && name.startsWithIgnoreCase("codec2.")) {
+ return new CCodec;
+ } else if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+ // at this time only ACodec specifies a mime type.
return new ACodec;
} else if (name.startsWithIgnoreCase("android.filter.")) {
return new MediaFilter;
@@ -1849,7 +1854,6 @@
}
}
}
-
if (mFlags & kFlagIsAsync) {
onOutputFormatChanged();
} else {
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 67e6748..9b2fb4f 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleDecodingSource"
+#include <utils/Log.h>
+
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -43,7 +47,7 @@
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
const sp<MediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec) {
+ const char *desiredCodec, bool skipMediaCodecList) {
sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
const char *mime = NULL;
sp<MetaData> meta = source->getFormat();
@@ -63,6 +67,33 @@
looper->start();
sp<MediaCodec> codec;
+ auto configure = [=](const sp<MediaCodec> &codec, const AString &componentName)
+ -> sp<SimpleDecodingSource> {
+ if (codec != NULL) {
+ ALOGI("Successfully allocated codec '%s'", componentName.c_str());
+
+ status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
+ sp<AMessage> outFormat;
+ if (err == OK) {
+ err = codec->getOutputFormat(&outFormat);
+ }
+ if (err == OK) {
+ return new SimpleDecodingSource(codec, source, looper,
+ surface != NULL,
+ strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
+ outFormat);
+ }
+
+ ALOGD("Failed to configure codec '%s'", componentName.c_str());
+ codec->release();
+ }
+ return NULL;
+ };
+
+ if (skipMediaCodecList) {
+ codec = MediaCodec::CreateByComponentName(looper, desiredCodec);
+ return configure(codec, desiredCodec);
+ }
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
@@ -73,22 +104,10 @@
ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
codec = MediaCodec::CreateByComponentName(looper, componentName);
- if (codec != NULL) {
- ALOGI("Successfully allocated codec '%s'", componentName.c_str());
-
- status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
- if (err == OK) {
- err = codec->getOutputFormat(&format);
- }
- if (err == OK) {
- return new SimpleDecodingSource(codec, source, looper,
- surface != NULL,
- strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
- format);
- }
-
- ALOGD("Failed to configure codec '%s'", componentName.c_str());
- codec->release();
+ sp<SimpleDecodingSource> res = configure(codec, componentName);
+ if (res != NULL) {
+ return res;
+ } else {
codec = NULL;
}
}
diff --git a/media/libstagefright/codec2/Android.bp b/media/libstagefright/codec2/Android.bp
index 311a20b..696a062 100644
--- a/media/libstagefright/codec2/Android.bp
+++ b/media/libstagefright/codec2/Android.bp
@@ -7,11 +7,20 @@
srcs: ["C2.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/codec2/include",
"frameworks/native/include/media/hardware",
],
+ export_include_dirs: [
+ "include",
+ ],
+
sanitize: {
misc_undefined: [
"unsigned-integer-overflow",
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
index 2dbf7ea..a555b35 100644
--- a/media/libstagefright/codec2/include/C2Component.h
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -360,6 +360,7 @@
C2DomainKind domain; ///< component domain (e.g. audio or video)
C2ComponentKind type; ///< component type (e.g. encoder, decoder or filter)
C2StringLiteral mediaType; ///< media type supported by the component
+ C2ComponentPriority priority; ///< priority used to determine component ordering
/**
* name alias(es) for backward compatibility.
@@ -569,7 +570,6 @@
*/
virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
-protected:
virtual ~C2Component() = default;
};
@@ -724,11 +724,11 @@
/**
* Returns the list of components supported by this component store.
*
- * This method may be momentarily blocking, but MUST return within 5ms.
+ * This method MUST return within 500ms.
*
* \retval vector of component information.
*/
- virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents_sm() const = 0;
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() = 0;
// -------------------------------------- UTILITY METHODS --------------------------------------
diff --git a/media/libstagefright/codec2/vndk/Android.bp b/media/libstagefright/codec2/vndk/Android.bp
index 64ce5e6..d2cfebb 100644
--- a/media/libstagefright/codec2/vndk/Android.bp
+++ b/media/libstagefright/codec2/vndk/Android.bp
@@ -9,6 +9,10 @@
"C2Store.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/codec2/include",
"frameworks/av/media/libstagefright/codec2/vndk/include",
diff --git a/media/libstagefright/codec2/vndk/C2Store.cpp b/media/libstagefright/codec2/vndk/C2Store.cpp
index 73ffaea..460cc60 100644
--- a/media/libstagefright/codec2/vndk/C2Store.cpp
+++ b/media/libstagefright/codec2/vndk/C2Store.cpp
@@ -20,12 +20,26 @@
#include <C2Component.h>
#include <C2PlatformSupport.h>
+#define LOG_TAG "C2Store"
+#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <dlfcn.h>
+
#include <map>
#include <memory>
#include <mutex>
namespace android {
+/**
+ * The platform allocator store provides basic allocator-types for the framework based on ion and
+ * gralloc. Allocators are not meant to be updatable.
+ *
+ * \todo Provide allocator based on ashmem
+ * \todo Move ion allocation into its HIDL or provide some mapping from memory usage to ion flags
+ * \todo Make this allocator store extendable
+ */
class C2PlatformAllocatorStore : public C2AllocatorStore {
public:
enum : id_t {
@@ -37,9 +51,11 @@
/* ionmapper */
);
- virtual c2_status_t fetchAllocator(id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
+ virtual c2_status_t fetchAllocator(
+ id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
- virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb() const override {
+ virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb()
+ const override {
return std::vector<std::shared_ptr<const C2Allocator::Traits>>(); /// \todo
}
@@ -48,10 +64,10 @@
}
private:
- // returns a shared-singleton ion allocator
+ /// returns a shared-singleton ion allocator
std::shared_ptr<C2Allocator> fetchIonAllocator();
- // returns a shared-singleton gralloc allocator
+ /// returns a shared-singleton gralloc allocator
std::shared_ptr<C2Allocator> fetchGrallocAllocator();
};
@@ -141,4 +157,385 @@
return res;
}
-} // namespace android
\ No newline at end of file
+class C2PlatformComponentStore : public C2ComponentStore {
+public:
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() override;
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const override;
+ virtual C2String getName() const override;
+ virtual c2_status_t querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const override;
+ virtual c2_status_t querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const override;
+ virtual c2_status_t query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const override;
+ virtual c2_status_t createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) override;
+ virtual c2_status_t createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) override;
+ virtual c2_status_t copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) override;
+ virtual c2_status_t config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+ virtual c2_status_t commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+
+ C2PlatformComponentStore();
+
+ virtual ~C2PlatformComponentStore() override = default;
+
+private:
+
+ /**
+ * An object encapsulating a loaded component module.
+ *
+ * \todo provide a way to add traits to known components here to avoid loading the .so-s
+ * for listComponents
+ */
+ struct ComponentModule : public C2ComponentFactory,
+ public std::enable_shared_from_this<ComponentModule> {
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) override;
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) override;
+
+ /**
+ * \returns the traits of the component in this module.
+ */
+ std::shared_ptr<const C2Component::Traits> getTraits();
+
+ /**
+ * Creates an uninitialized component module.
+ *
+ * \note Only used by ComponentLoader.
+ */
+ ComponentModule() : mInit(C2_NO_INIT) {}
+
+ /**
+ * Initializes a component module with a given library path. Must be called exactly once.
+ *
+ * \note Only used by ComponentLoader.
+ *
+ * \param libPath[in] library path (or name)
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded (unexpected)
+ * \retval C2_REFUSED permission denied to load the component module (unexpected)
+ * \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
+ */
+ c2_status_t init(std::string libPath);
+
+ virtual ~ComponentModule() override;
+
+ protected:
+ std::recursive_mutex mLock; ///< lock protecting mTraits
+ std::shared_ptr<C2Component::Traits> mTraits; ///< cached component traits
+
+ c2_status_t mInit; ///< initialization result
+
+ void *mLibHandle; ///< loaded library handle
+ C2ComponentFactory::CreateCodec2FactoryFunc createFactory; ///< loaded create function
+ C2ComponentFactory::DestroyCodec2FactoryFunc destroyFactory; ///< loaded destroy function
+ C2ComponentFactory *mComponentFactory; ///< loaded/created component factory
+ };
+
+ /**
+ * An object encapsulating a loadable component module.
+ *
+ * \todo make this also work for enumerations
+ */
+ struct ComponentLoader {
+ /**
+ * Load the component module.
+ *
+ * This method simply returns the component module if it is already currently loaded, or
+ * attempts to load it if it is not.
+ *
+ * \param module[out] pointer to the shared pointer where the loaded module shall be stored.
+ * This will be nullptr on error.
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded
+ * \retval C2_REFUSED permission denied to load the component module
+ */
+ c2_status_t fetchModule(std::shared_ptr<ComponentModule> *module) {
+ c2_status_t res = C2_OK;
+ std::lock_guard<std::mutex> lock(mMutex);
+ std::shared_ptr<ComponentModule> localModule = mModule.lock();
+ if (localModule == nullptr) {
+ localModule = std::make_shared<ComponentModule>();
+ res = localModule->init(mLibPath);
+ if (res == C2_OK) {
+ mModule = localModule;
+ }
+ }
+ *module = localModule;
+ return res;
+ }
+
+ /**
+ * Creates a component loader for a specific library path (or name).
+ */
+ ComponentLoader(std::string libPath)
+ : mLibPath(libPath) {}
+
+ private:
+ std::mutex mMutex; ///< mutex guarding the module
+ std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
+ std::string mLibPath; ///< library path (or name)
+ };
+
+ /**
+ * Retrieves the component loader for a component.
+ *
+ * \return a non-ref-holding pointer to the component loader.
+ *
+ * \retval C2_OK the component loader has been successfully retrieved
+ * \retval C2_NO_MEMORY not enough memory to locate the component loader
+ * \retval C2_NOT_FOUND could not locate the component to be loaded
+ * \retval C2_CORRUPTED the component loader could not be identified due to some modules being
+ * corrupted (this can happen if the name does not refer to an already
+ * identified component but some components could not be loaded due to
+ * bad library)
+ * \retval C2_REFUSED permission denied to find the component loader for the named component
+ * (this can happen if the name does not refer to an already identified
+ * component but some components could not be loaded due to lack of
+ * permissions)
+ */
+ c2_status_t findComponent(C2String name, ComponentLoader **loader);
+
+ std::map<C2String, ComponentLoader> mComponents; ///< list of components
+};
+
+c2_status_t C2PlatformComponentStore::ComponentModule::init(std::string libPath) {
+ ALOGV("in %s", __func__);
+ ALOGV("loading dll");
+ mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
+ if (mLibHandle == nullptr) {
+ // could be access/symbol or simply not being there
+ ALOGD("could not dlopen %s: %s", libPath.c_str(), dlerror());
+ mInit = C2_CORRUPTED;
+ } else {
+ createFactory =
+ (C2ComponentFactory::CreateCodec2FactoryFunc)dlsym(mLibHandle, "CreateCodec2Factory");
+ destroyFactory =
+ (C2ComponentFactory::DestroyCodec2FactoryFunc)dlsym(mLibHandle, "DestroyCodec2Factory");
+
+ mComponentFactory = createFactory();
+ if (mComponentFactory == nullptr) {
+ ALOGD("could not create factory in %s", libPath.c_str());
+ mInit = C2_NO_MEMORY;
+ } else {
+ mInit = C2_OK;
+ }
+ }
+ return mInit;
+}
+
+C2PlatformComponentStore::ComponentModule::~ComponentModule() {
+ ALOGV("in %s", __func__);
+ if (destroyFactory && mComponentFactory) {
+ destroyFactory(mComponentFactory);
+ }
+ if (mLibHandle) {
+ ALOGV("unloading dll");
+ dlclose(mLibHandle);
+ }
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) {
+ interface->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createInterface(
+ id, interface, [module, deleter](C2ComponentInterface *p) mutable {
+ // capture module so that we ensure we still have it while deleting interface
+ deleter(p); // delete interface first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ std::function<void(::android::C2Component*)> deleter) {
+ component->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createComponent(
+ id, component, [module, deleter](C2Component *p) mutable {
+ // capture module so that we ensure we still have it while deleting component
+ deleter(p); // delete component first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+std::shared_ptr<const C2Component::Traits> C2PlatformComponentStore::ComponentModule::getTraits() {
+ std::unique_lock<std::recursive_mutex> lock(mLock);
+ if (!mTraits) {
+ std::shared_ptr<C2ComponentInterface> intf;
+ c2_status_t res = createInterface(0, &intf);
+ if (res != C2_OK) {
+ return nullptr;
+ }
+
+ std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
+ if (traits) {
+ // traits->name = intf->getName();
+ }
+
+ mTraits = traits;
+ }
+ return mTraits;
+}
+
+C2PlatformComponentStore::C2PlatformComponentStore() {
+ // TODO: move this also into a .so so it can be updated
+ mComponents.emplace("c2.google.avc.decoder", "libstagefright_soft_c2avcdec.so");
+}
+
+c2_status_t C2PlatformComponentStore::copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) {
+ (void)src;
+ (void)dst;
+ return C2_OMITTED;
+}
+
+c2_status_t C2PlatformComponentStore::query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+ // there are no supported configs
+ (void)heapParams;
+ return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+ // This method SHALL return within 500ms.
+ std::vector<std::shared_ptr<const C2Component::Traits>> list;
+ for (auto &it : mComponents) {
+ ComponentLoader &loader = it.second;
+ std::shared_ptr<ComponentModule> module;
+ c2_status_t res = loader.fetchModule(&module);
+ if (res == C2_OK) {
+ std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
+ if (traits) {
+ list.push_back(traits);
+ }
+ }
+ }
+ return list;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
+ *loader = nullptr;
+ auto pos = mComponents.find(name);
+ // TODO: check aliases
+ if (pos == mComponents.end()) {
+ return C2_NOT_FOUND;
+ }
+ *loader = &pos->second;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) {
+ // This method SHALL return within 100ms.
+ component->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createComponent(0, component);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
+ // This method SHALL return within 100ms.
+ interface->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createInterface(0, interface);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+ // there are no supported config params
+ (void)params;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const {
+ // there are no supported config params
+ return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+C2String C2PlatformComponentStore::getName() const {
+ return "android.componentStore.platform";
+}
+
+std::shared_ptr<C2ParamReflector> C2PlatformComponentStore::getParamReflector() const {
+ // TODO
+ return nullptr;
+}
+
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore() {
+ static std::mutex mutex;
+ static std::weak_ptr<C2ComponentStore> platformStore;
+ std::lock_guard<std::mutex> lock(mutex);
+ std::shared_ptr<C2ComponentStore> store = platformStore.lock();
+ if (store == nullptr) {
+ store = std::make_shared<C2PlatformComponentStore>();
+ platformStore = store;
+ }
+ return store;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
index 8e45705..2281dab 100644
--- a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
+++ b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
@@ -19,6 +19,7 @@
#include <C2Component.h>
+#include <functional>
#include <memory>
namespace android {
@@ -64,14 +65,17 @@
*/
class C2ComponentFactory {
public:
+ typedef std::function<void(::android::C2Component*)> ComponentDeleter;
+ typedef std::function<void(::android::C2ComponentInterface*)> InterfaceDeleter;
+
/**
* Creates a component.
*
* This method SHALL return within 100ms.
*
+ * \param id component ID for the created component
* \param component shared pointer where the created component is stored. Cleared on
* failure and updated on success.
- * \param id component ID for the created component
*
* \retval C2_OK the component was created successfully
* \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
@@ -80,16 +84,17 @@
* \retval C2_NO_MEMORY not enough memory to create the component
*/
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) = 0;
/**
* Creates a component interface.
*
* This method SHALL return within 100ms.
*
+ * \param id component interface ID for the created interface
* \param interface shared pointer where the created interface is stored. Cleared on
* failure and updated on success.
- * \param id component interface ID for the created interface
*
* \retval C2_OK the component interface was created successfully
* \retval C2_TIMED_OUT could not create the component interface within the time limit
@@ -100,11 +105,22 @@
* \retval C2_NO_MEMORY not enough memory to create the component interface
*/
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) = 0;
virtual ~C2ComponentFactory() = default;
+
+ typedef ::android::C2ComponentFactory* (*CreateCodec2FactoryFunc)(void);
+ typedef void (*DestroyCodec2FactoryFunc)(::android::C2ComponentFactory*);
};
+/**
+ * Returns the platform component store.
+ * \retval nullptr if the platform component store could not be obtained
+ */
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore();
+
+
} // namespace android
#endif // STAGEFRIGHT_CODEC2_PLATFORM_SUPPORT_H_
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
index c74ca6d..2423629 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
@@ -261,7 +261,7 @@
mFrameRate(0u, 0),
mBlocksPerSecond(0u, 0),
mParamReflector(new ParamReflector) {
-
+ ALOGV("in %s", __func__);
mInputPortMime = C2PortMimeConfig::input::alloc_unique(strlen(CODEC_MIME_TYPE) + 1);
strcpy(mInputPortMime->m.mValue, CODEC_MIME_TYPE);
mOutputPortMime = C2PortMimeConfig::output::alloc_unique(strlen(MEDIA_MIMETYPE_VIDEO_RAW) + 1);
@@ -430,6 +430,10 @@
false, "_output_block_pools", mOutputBlockPools.get()));
}
+C2SoftAvcDecIntf::~C2SoftAvcDecIntf() {
+ ALOGV("in %s", __func__);
+}
+
C2String C2SoftAvcDecIntf::getName() const {
return mName;
}
@@ -653,6 +657,7 @@
mWidth(320),
mHeight(240),
mInputOffset(0) {
+ ALOGV("in %s", __func__);
GETTIME(&mTimeStart, NULL);
// If input dump is enabled, then open create an empty file
@@ -661,6 +666,7 @@
}
C2SoftAvcDec::~C2SoftAvcDec() {
+ ALOGV("in %s", __func__);
CHECK_EQ(deInitDecoder(), (status_t)OK);
}
@@ -790,6 +796,7 @@
}
void C2SoftAvcDec::processQueue() {
+#if 0
if (mIsInFlush) {
setFlushMode();
@@ -825,9 +832,10 @@
}
mIsInFlush = false;
}
+#endif
std::unique_ptr<C2Work> work;
- {
+ if (!mIsInFlush) {
std::unique_lock<std::mutex> lock(mQueueLock);
if (mQueue.empty()) {
mQueueCond.wait(lock);
@@ -844,7 +852,7 @@
process(work);
std::vector<std::unique_ptr<C2Work>> done;
- {
+ if (work) {
std::unique_lock<std::mutex> lock(mPendingLock);
uint32_t index = work->input.ordinal.frame_index;
mPendingWork[index].swap(work);
@@ -871,12 +879,12 @@
static void *ivd_aligned_malloc(void *ctxt, WORD32 alignment, WORD32 size) {
- UNUSED(ctxt);
+ (void) ctxt;
return memalign(alignment, size);
}
static void ivd_aligned_free(void *ctxt, void *buf) {
- UNUSED(ctxt);
+ (void) ctxt;
free(buf);
return;
}
@@ -1001,6 +1009,7 @@
}
status_t C2SoftAvcDec::setFlushMode() {
+ ALOGV("setFlushMode");
IV_API_CALL_STATUS_T status;
ivd_ctl_flush_ip_t s_video_flush_ip;
ivd_ctl_flush_op_t s_video_flush_op;
@@ -1019,7 +1028,7 @@
s_video_flush_op.u4_error_code);
return UNKNOWN_ERROR;
}
-
+ mIsInFlush = true;
return OK;
}
@@ -1079,7 +1088,6 @@
}
status_t C2SoftAvcDec::deInitDecoder() {
- size_t i;
IV_API_CALL_STATUS_T status;
if (mCodecCtx) {
@@ -1206,7 +1214,6 @@
if (mSignalledError) {
return;
}
-
if (NULL == mCodecCtx) {
if (OK != initDecoder()) {
ALOGE("Failed to initialize decoder");
@@ -1221,66 +1228,78 @@
setParams(mStride);
}
- const C2ConstLinearBlock &buffer =
- work->input.buffers[0]->data().linearBlocks().front();
- if (buffer.capacity() == 0) {
- // TODO: result?
+ uint32_t workIndex = 0;
+ std::unique_ptr<C2ReadView> input;
+ if (work) {
+ work->result = C2_OK;
- std::vector<std::unique_ptr<C2Work>> done;
- done.emplace_back(std::move(work));
- mListener->onWorkDone_nb(shared_from_this(), std::move(done));
- if (!(work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
- return;
- }
+ const C2ConstLinearBlock &buffer =
+ work->input.buffers[0]->data().linearBlocks().front();
+ if (buffer.capacity() == 0) {
+ // TODO: result?
- mReceivedEOS = true;
- // TODO: flush
- } else if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
- mReceivedEOS = true;
- }
-
- C2ReadView input = work->input.buffers[0]->data().linearBlocks().front().map().get();
- uint32_t workIndex = work->input.ordinal.frame_index & 0xFFFFFFFF;
-
- // TODO: populate --- assume display order?
- if (!mAllocatedBlock) {
- // TODO: error handling
- // TODO: format & usage
- uint32_t format = HAL_PIXEL_FORMAT_YV12;
- C2MemoryUsage usage = { C2MemoryUsage::kSoftwareRead, C2MemoryUsage::kSoftwareWrite };
- // TODO: lock access to interface
- C2BlockPool::local_id_t poolId =
- mIntf->mOutputBlockPools->flexCount() ?
- mIntf->mOutputBlockPools->m.mValues[0] : C2BlockPool::BASIC_GRAPHIC;
- if (!mOutputBlockPool || mOutputBlockPool->getLocalId() != poolId) {
- c2_status_t err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
- if (err != C2_OK) {
- // TODO: trip
+ std::vector<std::unique_ptr<C2Work>> done;
+ done.emplace_back(std::move(work));
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
+ if (!(work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
+ return;
}
- }
- ALOGE("using allocator %u", mOutputBlockPool->getAllocatorId());
- (void)mOutputBlockPool->fetchGraphicBlock(
- mWidth, mHeight, format, usage, &mAllocatedBlock);
- ALOGE("provided (%dx%d) required (%dx%d)", mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
+ mReceivedEOS = true;
+ // TODO: flush
+ } else if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ ALOGV("input EOS: %llu", work->input.ordinal.frame_index);
+ mReceivedEOS = true;
+ }
+
+ input.reset(new C2ReadView(work->input.buffers[0]->data().linearBlocks().front().map().get()));
+ workIndex = work->input.ordinal.frame_index & 0xFFFFFFFF;
}
- C2GraphicView output = mAllocatedBlock->map().get();
- ALOGE("mapped err = %d", output.error());
size_t inOffset = 0u;
- while (inOffset < input.capacity()) {
+ while (!input || inOffset < input->capacity()) {
+ if (!input) {
+ ALOGV("flushing");
+ }
+ // TODO: populate --- assume display order?
+ if (!mAllocatedBlock) {
+ // TODO: error handling
+ // TODO: format & usage
+ uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ C2MemoryUsage usage = { C2MemoryUsage::kSoftwareRead, C2MemoryUsage::kSoftwareWrite };
+ // TODO: lock access to interface
+ C2BlockPool::local_id_t poolId =
+ mIntf->mOutputBlockPools->flexCount() ?
+ mIntf->mOutputBlockPools->m.mValues[0] : C2BlockPool::BASIC_GRAPHIC;
+ if (!mOutputBlockPool || mOutputBlockPool->getLocalId() != poolId) {
+ c2_status_t err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
+ if (err != C2_OK) {
+ // TODO: trip
+ }
+ }
+ ALOGE("using allocator %u", mOutputBlockPool->getAllocatorId());
+
+ (void)mOutputBlockPool->fetchGraphicBlock(
+ mWidth, mHeight, format, usage, &mAllocatedBlock);
+ ALOGE("provided (%dx%d) required (%dx%d)", mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
+ }
+ C2GraphicView output = mAllocatedBlock->map().get();
+ if (output.error() != OK) {
+ ALOGE("mapped err = %d", output.error());
+ }
+
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- if (!setDecodeArgs(&s_dec_ip, &s_dec_op, &input, &output, workIndex, inOffset)) {
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, input.get(), &output, workIndex, inOffset)) {
ALOGE("Decoder arg setup failed");
// TODO: notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
return;
}
- ALOGE("Decoder arg setup succeeded");
+ ALOGV("Decoder arg setup succeeded");
// If input dump is enabled, then write to file
DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes, mInputOffset);
@@ -1321,15 +1340,24 @@
PRINT_TIME("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
- ALOGI("bytes total=%u", input.capacity());
+ if (input) {
+ ALOGI("bytes total=%u", input->capacity());
+ }
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
- if (1 != s_dec_op.u4_frame_decoded_flag) {
- /* If the input did not contain picture data, then ignore
- * the associated timestamp */
- //mTimeStampsValid[workIndex] = false;
+ if (1 != s_dec_op.u4_frame_decoded_flag && work) {
+ /* If the input did not contain picture data, return work without
+ * buffer */
+ ALOGV("no picture data");
+ std::vector<std::unique_ptr<C2Work>> done;
+ done.push_back(std::move(work));
+ done[0]->worklets.front()->output.flags = (C2BufferPack::flags_t)0;
+ done[0]->worklets.front()->output.buffers.clear();
+ done[0]->worklets.front()->output.buffers.emplace_back(nullptr);
+ done[0]->worklets.front()->output.ordinal = done[0]->input.ordinal;
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
}
// If the decoder is in the changing resolution mode and there is no output present,
@@ -1373,10 +1401,19 @@
}
if (s_dec_op.u4_output_present) {
- ALOGV("output_present");
- // TODO: outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
+ ALOGV("output_present: %d", s_dec_op.u4_ts);
std::vector<std::unique_ptr<C2Work>> done;
- done.push_back(std::move(mPendingWork[s_dec_op.u4_ts]));
+ {
+ std::unique_lock<std::mutex> lock(mPendingLock);
+ done.push_back(std::move(mPendingWork[s_dec_op.u4_ts]));
+ mPendingWork.erase(s_dec_op.u4_ts);
+ }
+ uint32_t flags = 0;
+ if (done[0]->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ ALOGV("EOS");
+ }
+ done[0]->worklets.front()->output.flags = (C2BufferPack::flags_t)flags;
done[0]->worklets.front()->output.buffers.clear();
done[0]->worklets.front()->output.buffers.emplace_back(
std::make_shared<GraphicBuffer>(std::move(mAllocatedBlock)));
@@ -1391,16 +1428,25 @@
/* If EOS was recieved on input port and there is no output
* from the codec, then signal EOS on output port */
if (mReceivedEOS) {
- // TODO
- // outHeader->nFilledLen = 0;
- // outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+ std::vector<std::unique_ptr<C2Work>> done;
+ {
+ std::unique_lock<std::mutex> lock(mPendingLock);
+ if (!mPendingWork.empty()) {
+ done.push_back(std::move(mPendingWork.begin()->second));
+ mPendingWork.erase(mPendingWork.begin());
+ }
+ }
+ if (!done.empty()) {
+ ALOGV("sending empty EOS buffer");
+ done[0]->worklets.front()->output.flags = C2BufferPack::FLAG_END_OF_STREAM;
+ done[0]->worklets.front()->output.buffers.clear();
+ done[0]->worklets.front()->output.buffers.emplace_back(nullptr);
+ done[0]->worklets.front()->output.ordinal = done[0]->input.ordinal;
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
+ }
- // outInfo->mOwnedByUs = false;
- // outQueue.erase(outQueue.begin());
- // outInfo = NULL;
- // notifyFillBufferDone(outHeader);
- // outHeader = NULL;
resetPlugin();
+ return;
}
}
inOffset += s_dec_op.u4_num_bytes_consumed;
@@ -1465,14 +1511,17 @@
class C2SoftAvcDecFactory : public C2ComponentFactory {
public:
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) override {
- *component = std::make_shared<C2SoftAvcDec>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(::android::C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(new C2SoftAvcDec("avc", id), deleter);
return C2_OK;
}
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) override {
- *interface = std::make_shared<C2SoftAvcDecIntf>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) override {
+ *interface =
+ std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
return C2_OK;
}
@@ -1482,9 +1531,11 @@
} // namespace android
extern "C" ::android::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
return new ::android::C2SoftAvcDecFactory();
}
extern "C" void DestroyCodec2Factory(::android::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
delete factory;
}
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
index 5deaf5d..28f1dfd 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
@@ -58,9 +58,6 @@
#define MIN(a, b) ((a) < (b)) ? (a) : (b)
-/** Used to remove warnings about unused parameters */
-#define UNUSED(x) ((void)(x))
-
/** Get time */
#define GETTIME(a, b) gettimeofday(a, b);
@@ -80,7 +77,7 @@
};
C2SoftAvcDecIntf(const char *name, c2_node_id_t id);
- virtual ~C2SoftAvcDecIntf() = default;
+ virtual ~C2SoftAvcDecIntf() override;
// From C2ComponentInterface
virtual C2String getName() const override;
diff --git a/media/libstagefright/codecs/cmds/Android.bp b/media/libstagefright/codecs/cmds/Android.bp
index e44e53c..ad0bd2d 100644
--- a/media/libstagefright/codecs/cmds/Android.bp
+++ b/media/libstagefright/codecs/cmds/Android.bp
@@ -22,7 +22,6 @@
"libstagefright",
"libstagefright_codec2",
"libstagefright_foundation",
- "libstagefright_soft_c2avcdec",
"libui",
"libutils",
],
diff --git a/media/libstagefright/codecs/cmds/codec2.cpp b/media/libstagefright/codecs/cmds/codec2.cpp
index 8e2c4b9..1972a7a 100644
--- a/media/libstagefright/codecs/cmds/codec2.cpp
+++ b/media/libstagefright/codecs/cmds/codec2.cpp
@@ -211,10 +211,9 @@
return;
}
- std::unique_ptr<C2ComponentFactory> factory(CreateCodec2Factory());
+ std::shared_ptr<C2ComponentStore> store = GetCodec2PlatformComponentStore();
std::shared_ptr<C2Component> component;
- (void)factory->createComponent(&component, 0);
- DestroyCodec2Factory(factory.release());
+ (void)store->createComponent("c2.google.avc.decoder", &component);
(void)component->setListener_sm(mListener);
std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71d625f..bc3e57c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -898,6 +898,9 @@
}
}
+ if (meta->get() == NULL) {
+ return ERROR_MALFORMED;
+ }
return OK;
}
diff --git a/media/libstagefright/include/CCodecBufferChannel.h b/media/libstagefright/include/CCodecBufferChannel.h
new file mode 100644
index 0000000..354cee2
--- /dev/null
+++ b/media/libstagefright/include/CCodecBufferChannel.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_BUFFER_CHANNEL_H_
+
+#define A_BUFFER_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#include <C2Buffer.h>
+#include <C2Component.h>
+
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/ICrypto.h>
+
+namespace android {
+
+/**
+ * BufferChannelBase implementation for ACodec.
+ */
+class CCodecBufferChannel : public BufferChannelBase {
+public:
+ class Buffers {
+ public:
+ Buffers() = default;
+ virtual ~Buffers() = default;
+
+ inline void setAlloc(const std::shared_ptr<C2BlockPool> &alloc) { mAlloc = alloc; }
+ inline void setFormat(const sp<AMessage> &format) { mFormat = format; }
+ inline const std::shared_ptr<C2BlockPool> &getAlloc() { return mAlloc; }
+
+ protected:
+ // Input: this object uses it to allocate input buffers with which the
+ // client fills.
+ // Output: this object passes it to the component.
+ std::shared_ptr<C2BlockPool> mAlloc;
+ sp<AMessage> mFormat;
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(Buffers);
+ };
+
+ class InputBuffers : public Buffers {
+ public:
+ using Buffers::Buffers;
+ virtual ~InputBuffers() = default;
+
+ virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) = 0;
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ virtual void flush() = 0;
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(InputBuffers);
+ };
+
+ class OutputBuffers : public Buffers {
+ public:
+ using Buffers::Buffers;
+ virtual ~OutputBuffers() = default;
+
+ virtual bool registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) = 0;
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ virtual void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(OutputBuffers);
+ };
+
+ CCodecBufferChannel(const std::function<void(status_t, enum ActionCode)> &onError);
+ virtual ~CCodecBufferChannel();
+
+ // BufferChannelBase interface
+ virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual status_t queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ bool secure,
+ const uint8_t *key,
+ const uint8_t *iv,
+ CryptoPlugin::Mode mode,
+ CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples,
+ size_t numSubSamples,
+ AString *errorDetailMsg) override;
+ virtual status_t renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) override;
+ virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+ virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+
+ // Methods below are interface for CCodec to use.
+
+ void setComponent(const std::shared_ptr<C2Component> &component);
+ status_t setSurface(const sp<Surface> &surface);
+
+ /**
+ * Set C2BlockPool for input buffers.
+ *
+ * TODO: start timestamp?
+ */
+ void setInputBufferAllocator(const sp<C2BlockPool> &inAlloc);
+
+ /**
+ * Set C2BlockPool for output buffers. This object shall never use the
+ * allocator itself; it's just passed
+ *
+ * TODO: start timestamp?
+ */
+ void setOutputBufferAllocator(const sp<C2BlockPool> &outAlloc);
+
+ /**
+ * Start queueing buffers to the component. This object should never queue
+ * buffers before this call.
+ */
+ void start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat);
+
+ /**
+ * Stop queueing buffers to the component. This object should never queue
+ * buffers after this call, until start() is called.
+ */
+ void stop();
+
+ void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ /**
+ * Notify MediaCodec about work done.
+ *
+ * @param workItems finished work items.
+ */
+ void onWorkDone(std::vector<std::unique_ptr<C2Work>> workItems);
+
+private:
+ class QueueGuard;
+
+ class QueueSync {
+ public:
+ inline QueueSync() : mCount(-1) {}
+ ~QueueSync() = default;
+
+ void start();
+ void stop();
+
+ private:
+ std::mutex mMutex;
+ std::atomic_int32_t mCount;
+
+ friend class CCodecBufferChannel::QueueGuard;
+ };
+
+ class QueueGuard {
+ public:
+ QueueGuard(QueueSync &sync);
+ ~QueueGuard();
+ inline bool isRunning() { return mRunning; }
+
+ private:
+ QueueSync &mSync;
+ bool mRunning;
+ };
+
+ QueueSync mSync;
+ sp<MemoryDealer> mDealer;
+ sp<IMemory> mDecryptDestination;
+ int32_t mHeapSeqNum;
+
+ std::shared_ptr<C2Component> mComponent;
+ std::function<void(status_t, enum ActionCode)> mOnError;
+ std::shared_ptr<C2BlockPool> mInputAllocator;
+ QueueSync mQueueSync;
+ Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
+ Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
+
+ std::atomic_uint64_t mFrameIndex;
+ std::atomic_uint64_t mFirstValidFrameIndex;
+
+ sp<MemoryDealer> makeMemoryDealer(size_t heapSize);
+ Mutexed<sp<Surface>> mSurface;
+
+ inline bool hasCryptoOrDescrambler() {
+ return mCrypto != NULL || mDescrambler != NULL;
+ }
+};
+
+} // namespace android
+
+#endif // A_BUFFER_CHANNEL_H_
diff --git a/media/libstagefright/include/Codec2Buffer.h b/media/libstagefright/include/Codec2Buffer.h
new file mode 100644
index 0000000..0272cea
--- /dev/null
+++ b/media/libstagefright/include/Codec2Buffer.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_BUFFER_H_
+
+#define CODEC2_BUFFER_H_
+
+#include <C2Buffer.h>
+
+#include <media/MediaCodecBuffer.h>
+
+namespace android {
+
+class C2Buffer;
+
+/**
+ * MediaCodecBuffer implementation wraps around C2LinearBlock.
+ */
+class Codec2Buffer : public MediaCodecBuffer {
+public:
+ static sp<Codec2Buffer> allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block);
+
+ virtual ~Codec2Buffer() = default;
+
+ C2ConstLinearBlock share();
+
+private:
+ Codec2Buffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ const std::shared_ptr<C2LinearBlock> &block);
+ Codec2Buffer() = delete;
+
+ std::shared_ptr<C2LinearBlock> mBlock;
+};
+
+} // namespace android
+
+#endif // CODEC2_BUFFER_H_
diff --git a/media/libstagefright/include/media/stagefright/CCodec.h b/media/libstagefright/include/media/stagefright/CCodec.h
new file mode 100644
index 0000000..3e24bbe
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/CCodec.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C_CODEC_H_
+#define C_CODEC_H_
+
+#include <chrono>
+
+#include <C2Component.h>
+
+#include <android/native_window.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/SkipCutBuffer.h>
+#include <utils/NativeHandle.h>
+#include <hardware/gralloc.h>
+#include <nativebase/nativebase.h>
+
+namespace android {
+
+class CCodecBufferChannel;
+
+class CCodec : public CodecBase {
+public:
+ CCodec();
+
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
+ virtual void initiateAllocateComponent(const sp<AMessage> &msg) override;
+ virtual void initiateConfigureComponent(const sp<AMessage> &msg) override;
+ virtual void initiateCreateInputSurface() override;
+ virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface) override;
+ virtual void initiateStart() override;
+ virtual void initiateShutdown(bool keepComponentAllocated = false) override;
+
+ virtual status_t setSurface(const sp<Surface> &surface) override;
+
+ virtual void signalFlush() override;
+ virtual void signalResume() override;
+
+ virtual void signalSetParameters(const sp<AMessage> &msg) override;
+ virtual void signalEndOfInputStream() override;
+ virtual void signalRequestIDRFrame() override;
+
+ void initiateReleaseIfStuck();
+
+protected:
+ virtual ~CCodec();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg) override;
+
+private:
+ typedef std::chrono::time_point<std::chrono::steady_clock> TimePoint;
+
+ void initiateStop();
+ void initiateRelease(bool sendCallback = true);
+
+ void allocate(const AString &componentName);
+ void configure(const sp<AMessage> &msg);
+ void start();
+ void stop();
+ void flush();
+ void release(bool sendCallback);
+
+ void setDeadline(const TimePoint &deadline);
+
+ enum {
+ kWhatAllocate,
+ kWhatConfigure,
+ kWhatStart,
+ kWhatFlush,
+ kWhatStop,
+ kWhatRelease,
+ };
+
+ enum {
+ RELEASED,
+ ALLOCATED,
+ FLUSHED,
+ RUNNING,
+
+ ALLOCATING, // RELEASED -> ALLOCATED
+ STARTING, // ALLOCATED -> RUNNING
+ STOPPING, // RUNNING -> ALLOCATED
+ FLUSHING, // RUNNING -> FLUSHED
+ RESUMING, // FLUSHED -> RUNNING
+ RELEASING, // {ANY EXCEPT RELEASED} -> RELEASED
+ };
+
+ struct State {
+ inline State() : mState(RELEASED) {}
+
+ int mState;
+ std::shared_ptr<C2Component> mComp;
+ };
+
+ struct Formats {
+ sp<AMessage> mInputFormat;
+ sp<AMessage> mOutputFormat;
+ };
+
+ Mutexed<State> mState;
+ std::shared_ptr<CCodecBufferChannel> mChannel;
+ std::shared_ptr<C2Component::Listener> mListener;
+ Mutexed<TimePoint> mDeadline;
+ Mutexed<Formats> mFormats;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CCodec);
+};
+
+} // namespace android
+
+#endif // C_CODEC_H_
diff --git a/media/libstagefright/include/media/stagefright/CodecBase.h b/media/libstagefright/include/media/stagefright/CodecBase.h
index 9197f7b..268662f 100644
--- a/media/libstagefright/include/media/stagefright/CodecBase.h
+++ b/media/libstagefright/include/media/stagefright/CodecBase.h
@@ -18,6 +18,7 @@
#define CODEC_BASE_H_
+#include <list>
#include <memory>
#include <stdint.h>
@@ -26,7 +27,6 @@
#include <media/hardware/CryptoAPI.h>
#include <media/hardware/HardwareAPI.h>
-#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/ColorUtils.h>
diff --git a/media/libstagefright/include/media/stagefright/MediaFilter.h b/media/libstagefright/include/media/stagefright/MediaFilter.h
index 0c10d11..a28c49d 100644
--- a/media/libstagefright/include/media/stagefright/MediaFilter.h
+++ b/media/libstagefright/include/media/stagefright/MediaFilter.h
@@ -57,7 +57,7 @@
OWNED_BY_UPSTREAM,
};
- IOMX::buffer_id mBufferID;
+ uint32_t mBufferID;
int32_t mGeneration;
int32_t mOutputFlags;
Status mStatus;
@@ -121,7 +121,7 @@
status_t allocateBuffersOnPort(OMX_U32 portIndex);
BufferInfo *findBufferByID(
- uint32_t portIndex, IOMX::buffer_id bufferID,
+ uint32_t portIndex, uint32_t bufferID,
ssize_t *index = NULL);
void postFillThisBuffer(BufferInfo *info);
void postDrainThisBuffer(BufferInfo *info);
diff --git a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
index 5060dc1..6aede08 100644
--- a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
+++ b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
@@ -47,7 +47,8 @@
static sp<SimpleDecodingSource> Create(
const sp<MediaSource> &source, uint32_t flags,
const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec = NULL);
+ const char *desiredCodec = NULL,
+ bool skipMediaCodecList = false);
static sp<SimpleDecodingSource> Create(
const sp<MediaSource> &source, uint32_t flags = 0);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a70005e..f331dbb 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -46,6 +46,36 @@
namespace android {
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+// (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+// incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+// of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
/**
* A copiable object managing a buffer in the buffer cache managed by the producer. This object
* holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
@@ -732,14 +762,16 @@
mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = std::llround(
- (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
- if (nFrames <= 0) {
+ double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+ if (nFrames < 0.5 - kTimestampFluctuation) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
return false;
}
- mFrameCount += nFrames;
+ if (nFrames <= 1.0) {
+ nFrames = 1.0;
+ }
+ mFrameCount += std::llround(nFrames);
mPrevCaptureUs = mBaseCaptureUs + std::llround(
mFrameCount * 1000000 / mCaptureFps);
mPrevFrameUs = mBaseFrameUs + std::llround(
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 2395f4f..f3f9720 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -45,6 +45,8 @@
MtpObjectFormat format,
bool succeeded) = 0;
+ virtual void doScanDirectory(const char* path) = 0;
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent) = 0;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6080868..bb0414d 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1148,6 +1148,7 @@
ALOGV("Copying file from %s to %s", (const char*)fromPath, (const char*)path);
if (format == MTP_FORMAT_ASSOCIATION) {
int ret = makeFolder((const char *)path);
+ ret += copyRecursive(fromPath, path);
if (ret) {
result = MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1158,6 +1159,8 @@
}
mDatabase->endSendObject(path, handle, format, result);
+ if (format == MTP_FORMAT_ASSOCIATION)
+ mDatabase->doScanDirectory(path);
mResponse.setParameter(1, handle);
return result;
}
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 11dedbb..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
enum {
kWhatActivityNotify,
+ kWhatAsyncNotify,
kWhatRequestActivityNotifications,
kWhatStopActivityNotifications,
};
@@ -88,6 +89,11 @@
bool mRequestedActivityNotification;
OnCodecEvent mCallback;
void *mCallbackUserData;
+
+ sp<AMessage> mAsyncNotify;
+ mutable Mutex mAsyncCallbackLock;
+ AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+ void *mAsyncCallbackUserData;
};
CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
break;
}
+ case kWhatAsyncNotify:
+ {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatAsyncNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncInputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ AMediaCodecBufferInfo bufferInfo = {
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncOutputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index,
+ &bufferInfo);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ mCodec->mAsyncCallback.onAsyncFormatChanged(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ aMediaFormat);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_ERROR:
+ {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ mCodec->mAsyncCallback.onAsyncError(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ translate_error(err),
+ actionCode,
+ detail.c_str());
+ }
+
+ break;
+ }
+
+ default:
+ {
+ ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
case kWhatStopActivityNotifications:
{
sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
- PRIORITY_FOREGROUND);
+ PRIORITY_AUDIO);
if (res != OK) {
ALOGE("Failed to start the looper");
AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
mData->mRequestedActivityNotification = false;
mData->mCallback = NULL;
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = NULL;
+
return mData;
}
@@ -222,6 +372,32 @@
}
EXPORT
+media_status_t AMediaCodec_getName(
+ AMediaCodec *mData,
+ char** out_name) {
+ if (out_name == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AString compName;
+ status_t err = mData->mCodec->getName(&compName);
+ if (err != OK) {
+ return translate_error(err);
+ }
+ *out_name = strdup(compName.c_str());
+ return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+ AMediaCodec * /* mData */,
+ char* name) {
+ if (name != NULL) {
+ free(name);
+ }
+}
+
+EXPORT
media_status_t AMediaCodec_configure(
AMediaCodec *mData,
const AMediaFormat* format,
@@ -236,8 +412,40 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface,
- crypto ? crypto->mCrypto : NULL, flags));
+ status_t err = mData->mCodec->configure(nativeFormat, surface,
+ crypto ? crypto->mCrypto : NULL, flags);
+ if (err != OK) {
+ ALOGE("configure: err(%d), failed with format: %s",
+ err, nativeFormat->debugString(0).c_str());
+ }
+ return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec *mData,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata) {
+ if (mData->mAsyncNotify == NULL && userdata != NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+ }
+
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
+
+ return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+ return translate_error(mData->mCodec->releaseCrypto());
}
EXPORT
@@ -282,6 +490,19 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -304,6 +525,19 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -367,6 +601,13 @@
}
EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+ sp<AMessage> format;
+ mData->mCodec->getInputFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
sp<AMessage> format;
mData->mCodec->getOutputFormat(index, &format);
@@ -542,6 +783,16 @@
return translate_error(err);
}
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
EXPORT
void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..a9025c0 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -125,6 +125,14 @@
ret.appendFormat("double(%f)", val);
break;
}
+ case AMessage::kTypeRect:
+ {
+ int32_t left, top, right, bottom;
+ f->findRect(name, &left, &top, &right, &bottom);
+ ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+ left, top, right, bottom);
+ break;
+ }
case AMessage::kTypeString:
{
AString val;
@@ -165,11 +173,22 @@
}
EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+ return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
return format->mFormat->findSize(name, out);
}
EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+ return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
sp<ABuffer> buf;
if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +235,22 @@
}
EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+ format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+ format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
// AMessage::setString() makes a copy of the string
format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +268,61 @@
}
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_HEIGHT = "grid-height";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_WIDTH = "grid-width";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
} // extern "C"
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b15de38..f4a51d0 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -53,11 +53,63 @@
typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
enum {
+ AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
- AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+ AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+ AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+ AMediaCodec *codec,
+ void *userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+ AMediaCodecOnAsyncInputAvailable onAsyncInputAvailable;
+ AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+ AMediaCodecOnAsyncFormatChanged onAsyncFormatChanged;
+ AMediaCodecOnAsyncError onAsyncError;
};
#if __ANDROID_API__ >= 21
@@ -289,6 +341,71 @@
#endif /* __ANDROID_API__ >= 26 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec*,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index da61b64..e48fcbe 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -35,6 +35,17 @@
typedef enum {
AMEDIA_OK = 0,
+ /**
+ * This indicates required resource was not able to be allocated.
+ */
+ AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+ /**
+ * This indicates the resource manager reclaimed the media resource used by the codec.
+ * With this error, the codec must be released, as it has moved to terminal state.
+ */
+ AMEDIACODEC_ERROR_RECLAIMED = 1101,
+
AMEDIA_ERROR_BASE = -10000,
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 018ab76..b6489c7 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -51,6 +51,7 @@
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
@@ -80,33 +81,75 @@
/**
* XXX should these be ints/enums that we look up in a table as needed?
*/
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
extern const char* AMEDIAFORMAT_KEY_DURATION;
extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLS;
+extern const char* AMEDIAFORMAT_KEY_GRID_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_GRID_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
#endif /* __ANDROID_API__ >= 21 */
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..f2d97cd 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
AImage_getPlaneRowStride; # introduced=24
AImage_getTimestamp; # introduced=24
AImage_getWidth; # introduced=24
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+ AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
AMEDIAFORMAT_KEY_IS_ADTS; # var
AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
AMEDIAFORMAT_KEY_IS_DEFAULT; # var
AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+ AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
AMEDIAFORMAT_KEY_MAX_WIDTH; # var
AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+ AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+ AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+ AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
+ AMediaCodecActionCode_isRecoverable; # introduced=28
+ AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
AMediaCodecCryptoInfo_getClearBytes;
AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,16 @@
AMediaCodec_dequeueOutputBuffer;
AMediaCodec_flush;
AMediaCodec_getInputBuffer;
+ AMediaCodec_getInputFormat; # introduced=28
+ AMediaCodec_getName; # introduced=28
AMediaCodec_getOutputBuffer;
AMediaCodec_getOutputFormat;
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
+ AMediaCodec_releaseCrypto; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
+ AMediaCodec_setAsyncNotifyCallback; # introduced=28
AMediaCodec_setOutputSurface; # introduced=24
AMediaCodec_setParameters; # introduced=26
AMediaCodec_setInputSurface; # introduced=26
@@ -127,16 +164,21 @@
AMediaExtractor_unselectTrack;
AMediaFormat_delete;
AMediaFormat_getBuffer;
+ AMediaFormat_getDouble; # introduced=28
AMediaFormat_getFloat;
AMediaFormat_getInt32;
AMediaFormat_getInt64;
+ AMediaFormat_getRect; # introduced=28
AMediaFormat_getSize;
AMediaFormat_getString;
AMediaFormat_new;
AMediaFormat_setBuffer;
+ AMediaFormat_setDouble; # introduced=28
AMediaFormat_setFloat;
AMediaFormat_setInt32;
AMediaFormat_setInt64;
+ AMediaFormat_setRect; # introduced=28
+ AMediaFormat_setSize; # introduced=28
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 9cb0357..aeb32bb 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -674,7 +674,11 @@
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
}
+
output.sessionId = sessionId;
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
@@ -1568,120 +1572,144 @@
// ----------------------------------------------------------------------------
-sp<media::IAudioRecord> AudioFlinger::openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *frameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status,
- audio_port_handle_t portId)
+sp<media::IAudioRecord> AudioFlinger::createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
- audio_session_t lSessionId;
+ audio_session_t sessionId = input.sessionId;
+ audio_port_handle_t portId;
- cblk.clear();
- buffers.clear();
+ output.cblk.clear();
+ output.buffers.clear();
- bool updatePid = (pid == -1);
+ bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ uid_t clientUid = input.clientInfo.clientUid;
if (!isTrustedCallingUid(callingUid)) {
- ALOGW_IF((uid_t)clientUid != callingUid,
- "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
+ ALOGW_IF(clientUid != callingUid,
+ "%s uid %d tried to pass itself off as %d",
+ __FUNCTION__, callingUid, clientUid);
clientUid = callingUid;
updatePid = true;
}
-
+ pid_t clientPid = input.clientInfo.clientPid;
if (updatePid) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
- ALOGW_IF(pid != -1 && pid != callingPid,
+ ALOGW_IF(clientPid != -1 && clientPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, callingUid, callingPid, pid);
- pid = callingPid;
+ __func__, callingUid, callingPid, clientPid);
+ clientPid = callingPid;
}
// check calling permissions
- if (!recordingAllowed(opPackageName, tid, clientUid)) {
- ALOGE("openRecord() permission denied: recording not allowed");
+ if (!recordingAllowed(input.opPackageName, input.clientInfo.clientTid, clientUid)) {
+ ALOGE("createRecord() permission denied: recording not allowed");
lStatus = PERMISSION_DENIED;
goto Exit;
}
-
- // further sample rate checks are performed by createRecordTrack_l()
- if (sampleRate == 0) {
- ALOGE("openRecord() invalid sample rate %u", sampleRate);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
// we don't yet support anything other than linear PCM
- if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
- ALOGE("openRecord() invalid format %#x", format);
+ if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
+ ALOGE("createRecord() invalid format %#x", input.config.format);
lStatus = BAD_VALUE;
goto Exit;
}
// further channel mask checks are performed by createRecordTrack_l()
- if (!audio_is_input_channel(channelMask)) {
- ALOGE("openRecord() invalid channel mask %#x", channelMask);
+ if (!audio_is_input_channel(input.config.channel_mask)) {
+ ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
lStatus = BAD_VALUE;
goto Exit;
}
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
+ output.sessionId = sessionId;
+ output.inputId = AUDIO_IO_HANDLE_NONE;
+ output.selectedDeviceId = input.selectedDeviceId;
+ output.flags = input.flags;
+
+ client = registerPid(clientPid);
+
+ // Not a conventional loop, but a retry loop for at most two iterations total.
+ // Try first maybe with FAST flag then try again without FAST flag if that fails.
+ // Exits loop via break on no error of got exit on error
+ // The sp<> references will be dropped when re-entering scope.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
+ for (;;) {
+ lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
+ sessionId,
+ // FIXME compare to AudioTrack
+ clientPid,
+ clientUid,
+ &input.config,
+ output.flags, &output.selectedDeviceId, &portId);
+
{
Mutex::Autolock _l(mLock);
- RecordThread *thread = checkRecordThread_l(input);
+ RecordThread *thread = checkRecordThread_l(output.inputId);
if (thread == NULL) {
- ALOGE("openRecord() checkRecordThread_l failed");
+ ALOGE("createRecord() checkRecordThread_l failed");
lStatus = BAD_VALUE;
goto Exit;
}
- client = registerPid(pid);
+ ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);
- if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
- if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
- lStatus = BAD_VALUE;
- goto Exit;
- }
- lSessionId = *sessionId;
- } else {
- // if no audio session id is provided, create one here
- lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
- }
- ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input);
+ output.sampleRate = input.config.sample_rate;
+ output.frameCount = input.frameCount;
+ output.notificationFrameCount = input.notificationFrameCount;
- recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
- frameCount, lSessionId, notificationFrames,
- clientUid, flags, tid, &lStatus, portId);
+ recordTrack = thread->createRecordTrack_l(client, &output.sampleRate,
+ input.config.format, input.config.channel_mask,
+ &output.frameCount, sessionId,
+ &output.notificationFrameCount,
+ clientUid, &output.flags,
+ input.clientInfo.clientTid,
+ &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
- if (lStatus == NO_ERROR) {
- // Check if one effect chain was awaiting for an AudioRecord to be created on this
- // session and move it to this thread.
- sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
- if (chain != 0) {
- Mutex::Autolock _l(thread->mLock);
- thread->addEffectChain_l(chain);
- }
+ // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
+ // audio policy manager without FAST constraint
+ if (lStatus == BAD_TYPE) {
+ AudioSystem::releaseInput(output.inputId, sessionId);
+ recordTrack.clear();
+ continue;
}
+
+ if (lStatus != NO_ERROR) {
+ recordTrack.clear();
+ goto Exit;
+ }
+
+ // Check if one effect chain was awaiting for an AudioRecord to be created on this
+ // session and move it to this thread.
+ sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
+ if (chain != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->addEffectChain_l(chain);
+ }
+ break;
+ }
+ // End of retry loop.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
}
+ output.cblk = recordTrack->getCblk();
+ output.buffers = recordTrack->getBuffers();
+
+ // return handle to client
+ recordHandle = new RecordHandle(recordTrack);
+
+Exit:
if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the RecordTrack so that the
// Client destructor is called by the TrackBase destructor with mClientLock held
@@ -1691,17 +1719,8 @@
Mutex::Autolock _cl(mClientLock);
client.clear();
}
- recordTrack.clear();
- goto Exit;
}
- cblk = recordTrack->getCblk();
- buffers = recordTrack->getBuffers();
-
- // return handle to client
- recordHandle = new RecordHandle(recordTrack);
-
-Exit:
*status = lStatus;
return recordHandle;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 506420c..bc73ffd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -118,23 +118,9 @@
CreateTrackOutput& output,
status_t *status);
- virtual sp<media::IAudioRecord> openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status /*non-NULL*/,
- audio_port_handle_t portId);
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status);
virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const;
virtual audio_format_t format(audio_io_handle_t output) const;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e0d0d7b..ef6e223 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -328,21 +328,21 @@
} else {
{ // convert input to int16_t as effect doesn't support float.
if (!auxType) {
- if (mInBuffer16.get() == nullptr) {
- ALOGW("%s: mInBuffer16 is null, bypassing", __func__);
+ if (mInConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
goto data_bypass;
}
const float * const pIn = mInBuffer->audioBuffer()->f32;
- int16_t * const pIn16 = mInBuffer16->audioBuffer()->s16;
+ int16_t * const pIn16 = mInConversionBuffer->audioBuffer()->s16;
memcpy_to_i16_from_float(
pIn16, pIn, inChannelCount * mConfig.inputCfg.buffer.frameCount);
}
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- if (mOutBuffer16.get() == nullptr) {
- ALOGW("%s: mOutBuffer16 is null, bypassing", __func__);
+ if (mOutConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
goto data_bypass;
}
- int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+ int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
const float * const pOut = mOutBuffer->audioBuffer()->f32;
memcpy_to_i16_from_float(
pOut16,
@@ -354,7 +354,7 @@
ret = mEffectInterface->process();
{ // convert output back to float.
- const int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+ const int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
float * const pOut = mOutBuffer->audioBuffer()->f32;
memcpy_to_float_from_i16(
pOut, pOut16, outChannelCount * mConfig.outputCfg.buffer.frameCount);
@@ -906,7 +906,7 @@
mEffectInterface->setInBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
+ // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
// Theoretically insert effects can also do in-place conversions (destroying
// the original buffer) when the output buffer is identical to the input buffer,
// but we don't optimize for it here.
@@ -920,17 +920,18 @@
ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
__func__, inChannels, inFrameCount, size);
- if (size > 0 && (mInBuffer16.get() == nullptr || size > mInBuffer16->getSize())) {
- mInBuffer16.clear();
- ALOGV("%s: allocating mInBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mInBuffer16);
+ if (size > 0 && (mInConversionBuffer.get() == nullptr
+ || size > mInConversionBuffer->getSize())) {
+ mInConversionBuffer.clear();
+ ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mInConversionBuffer);
}
- if (mInBuffer16.get() != nullptr) {
+ if (mInConversionBuffer.get() != nullptr) {
// FIXME: confirm buffer has enough size.
- mInBuffer16->setFrameCount(inFrameCount);
- mEffectInterface->setInBuffer(mInBuffer16);
+ mInConversionBuffer->setFrameCount(inFrameCount);
+ mEffectInterface->setInBuffer(mInConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mInBuffer16", __func__);
+ ALOGE("%s cannot create mInConversionBuffer", __func__);
}
}
#endif
@@ -948,7 +949,7 @@
mEffectInterface->setOutBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // Note: Any effect that does not accumulate does not need mOutBuffer16 and
+ // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
// can do in-place conversion from int16_t to float. We don't optimize here.
if (!mSupportsFloat && mOutBuffer.get() != nullptr) {
const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
@@ -958,16 +959,17 @@
ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
__func__, outChannels, outFrameCount, size);
- if (size > 0 && (mOutBuffer16.get() == nullptr || size > mOutBuffer16->getSize())) {
- mOutBuffer16.clear();
- ALOGV("%s: allocating mOutBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mOutBuffer16);
+ if (size > 0 && (mOutConversionBuffer.get() == nullptr
+ || size > mOutConversionBuffer->getSize())) {
+ mOutConversionBuffer.clear();
+ ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mOutConversionBuffer);
}
- if (mOutBuffer16.get() != nullptr) {
- mOutBuffer16->setFrameCount(outFrameCount);
- mEffectInterface->setOutBuffer(mOutBuffer16);
+ if (mOutConversionBuffer.get() != nullptr) {
+ mOutConversionBuffer->setFrameCount(outFrameCount);
+ mEffectInterface->setOutBuffer(mOutConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mOutBuffer16", __func__);
+ ALOGE("%s cannot create mOutConversionBuffer", __func__);
}
}
#endif
@@ -1241,6 +1243,20 @@
return s;
}
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+ std::stringstream ss;
+
+ if (buffer.get() == nullptr) {
+ return "nullptr"; // make different than below
+ } else if (buffer->externalData() != nullptr) {
+ ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+ << " -> "
+ << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ ss << buffer->audioBuffer()->raw;
+ }
+ return ss.str();
+}
void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
{
@@ -1305,19 +1321,13 @@
result.append(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- if (!mSupportsFloat) {
- int16_t* pIn16 = mInBuffer16 != 0 ? mInBuffer16->audioBuffer()->s16 : NULL;
- int16_t* pOut16 = mOutBuffer16 != 0 ? mOutBuffer16->audioBuffer()->s16 : NULL;
- result.append("\t\t- Float and int16 buffers\n");
- result.append("\t\t\tIn_float In_int16 Out_float Out_int16\n");
- snprintf(buffer, SIZE,"\t\t\t%p %p %p %p\n",
- mConfig.inputCfg.buffer.raw,
- pIn16,
- pOut16,
- mConfig.outputCfg.buffer.raw);
- result.append(buffer);
- }
+ result.appendFormat("\t\t- HAL buffers:\n"
+ "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+ dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+ dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
#endif
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -2161,19 +2171,6 @@
}
}
-static void dumpInOutBuffer(
- char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
- if (buffer == nullptr) {
- snprintf(dump, dumpSize, "%p", buffer);
- } else if (buffer->externalData() != nullptr) {
- snprintf(dump, dumpSize, "%p -> %p",
- isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
- isInput ? buffer->audioBuffer()->raw : buffer->externalData());
- } else {
- snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
- }
-}
-
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -2191,15 +2188,13 @@
result.append("\tCould not lock mutex:\n");
}
- char inBufferStr[64], outBufferStr[64];
- dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
- dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
- snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
- (int)strlen(inBufferStr), "In buffer ",
- (int)strlen(outBufferStr), "Out buffer ");
- result.append(buffer);
- snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
- result.append(buffer);
+ const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+ const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+ result.appendFormat("\t%-*s%-*s Active tracks:\n",
+ (int)inBufferStr.size(), "In buffer ",
+ (int)outBufferStr.size(), "Out buffer ");
+ result.appendFormat("\t%s %s %d\n",
+ inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
write(fd, result.string(), result.size());
for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 1864e0f..eea3208 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -171,8 +171,8 @@
#ifdef FLOAT_EFFECT_CHAIN
bool mSupportsFloat; // effect supports float processing
- sp<EffectBufferHalInterface> mInBuffer16; // Buffers for interacting with HAL at 16 bits
- sp<EffectBufferHalInterface> mOutBuffer16;
+ sp<EffectBufferHalInterface> mInConversionBuffer; // Buffers for HAL conversion if needed.
+ sp<EffectBufferHalInterface> mOutConversionBuffer;
#endif
};
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b2a1e18..7636df6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6708,12 +6708,12 @@
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
+ uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *notificationFrames,
+ size_t *pNotificationFrameCount,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
@@ -6721,16 +6721,30 @@
audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
+ size_t notificationFrameCount = *pNotificationFrameCount;
sp<RecordTrack> track;
status_t lStatus;
audio_input_flags_t inputFlags = mInput->flags;
+ audio_input_flags_t requestedFlags = *flags;
+ uint32_t sampleRate;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGE("createRecordTrack_l() audio driver not initialized");
+ goto Exit;
+ }
+
+ if (*pSampleRate == 0) {
+ *pSampleRate = mSampleRate;
+ }
+ sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast capture is present
if (hasFastCapture()) {
inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
}
- // Check if requested flags are compatible with output stream flags
+ // Check if requested flags are compatible with input stream flags
if ((*flags & inputFlags) != *flags) {
ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
" input flags (%08x)",
@@ -6785,12 +6799,20 @@
}
}
+ // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
+ if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
+ (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
+ *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
+ lStatus = BAD_TYPE;
+ goto Exit;
+ }
+
// compute track buffer size in frames, and suggest the notification frame count
if (*flags & AUDIO_INPUT_FLAG_FAST) {
// fast track: frame count is exactly the pipe depth
frameCount = mPipeFramesP2;
// ignore requested notificationFrames, and always notify exactly once every HAL buffer
- *notificationFrames = mFrameCount;
+ notificationFrameCount = mFrameCount;
} else {
// not fast track: max notification period is resampled equivalent of one HAL buffer time
// or 20 ms if there is a fast capture
@@ -6809,17 +6831,12 @@
const size_t minFrameCount = maxNotificationFrames *
max(kMinNotifications, minNotificationsByMs);
frameCount = max(frameCount, minFrameCount);
- if (*notificationFrames == 0 || *notificationFrames > maxNotificationFrames) {
- *notificationFrames = maxNotificationFrames;
+ if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
+ notificationFrameCount = maxNotificationFrames;
}
}
*pFrameCount = frameCount;
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGE("createRecordTrack_l() audio driver not initialized");
- goto Exit;
- }
+ *pNotificationFrameCount = notificationFrameCount;
{ // scope for mLock
Mutex::Autolock _l(mLock);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index c7b60d6..17f26c5 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1327,12 +1327,12 @@
sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
+ uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *notificationFrames,
+ size_t *pNotificationFrameCount,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index d4ce0b4..a3ea756 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -192,7 +192,7 @@
// where for AudioTrack (but not AudioRecord),
// 8-bit PCM samples are stored as 16-bit
const size_t mFrameCount;// size of track buffer given at createTrack() or
- // openRecord(), and then adjusted as needed
+ // createRecord(), and then adjusted as needed
const audio_session_t mSessionId;
uid_t mUid;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b169bac..d9cd121 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -34,8 +34,8 @@
class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
{
public:
- explicit AudioInputDescriptor(const sp<IOProfile>& profile);
- void setIoHandle(audio_io_handle_t ioHandle);
+ explicit AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface);
audio_port_handle_t getId() const;
audio_module_handle_t getModuleHandle() const;
uint32_t getOpenRefCount() const;
@@ -73,6 +73,14 @@
void setPatchHandle(audio_patch_handle_t handle);
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input);
+ void close();
+
private:
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
@@ -85,6 +93,7 @@
// a particular input started and prevent preemption of this active input by this session.
// We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
SortedVector<audio_session_t> mPreemptedSessions;
+ AudioPolicyClientInterface *mClientInterface;
};
class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c09cb5a..0be8fc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -101,8 +101,6 @@
status_t dump(int fd);
- void setIoHandle(audio_io_handle_t ioHandle);
-
virtual audio_devices_t device() const;
virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
virtual audio_devices_t supportedDevices();
@@ -122,6 +120,14 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port *port) const;
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output);
+ void close();
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ec04ef7..118f0d2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -34,7 +34,11 @@
{
public:
IOProfile(const String8 &name, audio_port_role_t role)
- : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
+ : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
+ maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+ curOpenCount(0),
+ maxActiveCount(1),
+ curActiveCount(0) {}
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
@@ -103,6 +107,34 @@
const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
+ bool canOpenNewIo() {
+ if (maxOpenCount == 0 || curOpenCount < maxOpenCount) {
+ return true;
+ }
+ return false;
+ }
+
+ bool canStartNewIo() {
+ if (maxActiveCount == 0 || curActiveCount < maxActiveCount) {
+ return true;
+ }
+ return false;
+ }
+
+ // Maximum number of input or output streams that can be simultaneously opened for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
+ // profiles and 0 for input profiles
+ uint32_t maxOpenCount;
+ // Number of streams currently opened for this profile.
+ uint32_t curOpenCount;
+ // Maximum number of input or output streams that can be simultaneously active for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
+ // profiles and 1 for input profiles
+ uint32_t maxActiveCount;
+ // Number of streams currently active for this profile. This is not the number of active clients
+ // (AudioTrack or AudioRecord) but the number of active HAL streams.
+ uint32_t curActiveCount;
+
private:
DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
index 078b582..3b0e209 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Serializer.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -92,6 +92,8 @@
static const char name[];
static const char role[];
static const char flags[];
+ static const char maxOpenCount[];
+ static const char maxActiveCount[];
};
typedef IOProfile Element;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 2492ed6..46168a4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::AudioInputDescriptor"
//#define LOG_NDEBUG 0
+#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
#include "IOProfile.h"
#include "AudioGain.h"
@@ -26,10 +27,12 @@
namespace android {
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface)
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
- mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
+ mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
+ mClientInterface(clientInterface)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -39,12 +42,6 @@
}
}
-void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
{
if (mProfile == 0) {
@@ -192,6 +189,74 @@
return config;
}
+status_t AudioInputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+
+ ALOGV("opening input for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
+ input,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ source,
+ flags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openInput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *input;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+
+void AudioInputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ mClientInterface->closeInput(mIoHandle);
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4d3c3b5..f6ee1c3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -23,6 +23,7 @@
#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
+#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
// A device mask for all audio output devices that are considered "remote" when evaluating
@@ -231,13 +232,6 @@
}
}
-void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
-
status_t SwAudioOutputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
@@ -387,6 +381,96 @@
return changed;
}
+status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
+ lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ lConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lConfig.offload_info.sample_rate = lConfig.sample_rate;
+ lConfig.offload_info.channel_mask = lConfig.channel_mask;
+ lConfig.offload_info.format = lConfig.format;
+ lConfig.offload_info.stream_type = stream;
+ lConfig.offload_info.duration_us = -1;
+ lConfig.offload_info.has_video = true; // conservative
+ lConfig.offload_info.is_streaming = true; // likely
+ }
+
+ mFlags = (audio_output_flags_t)(mFlags | flags);
+
+ ALOGV("opening output for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
+ output,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ &mLatency,
+ mFlags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openOutput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *output;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+
+void SwAudioOutputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ AudioParameter param;
+ param.add(String8("closing"), String8("true"));
+ mClientInterface->setParameters(mIoHandle, param.toString());
+
+ mClientInterface->closeOutput(mIoHandle);
+
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 74ef4ec..fc89672 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -122,6 +122,16 @@
result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
+
+ result.clear();
+ snprintf(buffer, SIZE, "\n - maxOpenCount: %u - curOpenCount: %u\n",
+ maxOpenCount, curOpenCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " - maxActiveCount: %u - curActiveCount: %u\n",
+ maxActiveCount, curActiveCount);
+ result.append(buffer);
+
+ write(fd, result.string(), result.size());
}
void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 0908ffc..aa589f4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -217,6 +217,8 @@
const char MixPortTraits::Attributes::name[] = "name";
const char MixPortTraits::Attributes::role[] = "role";
const char MixPortTraits::Attributes::flags[] = "flags";
+const char MixPortTraits::Attributes::maxOpenCount[] = "maxOpenCount";
+const char MixPortTraits::Attributes::maxActiveCount[] = "maxActiveCount";
status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
PtrSerializingCtx /*serializingContext*/)
@@ -259,6 +261,14 @@
mixPort->setFlags(InputFlagConverter::maskFromString(flags));
}
}
+ string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
+ if (!maxOpenCount.empty()) {
+ convertTo(maxOpenCount, mixPort->maxOpenCount);
+ }
+ string maxActiveCount = getXmlAttribute(child, Attributes::maxActiveCount);
+ if (!maxActiveCount.empty()) {
+ convertTo(maxActiveCount, mixPort->maxActiveCount);
+ }
// Deserialize children
AudioGainTraits::Collection gains;
deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 62cbfc1..b363779 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -843,12 +843,10 @@
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
- ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
+ ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %x, channel mask %x, flags %x",
device, config->sample_rate, config->format, config->channel_mask, flags);
- *output = getOutputForDevice(device, session, *stream,
- config->sample_rate, config->format, config->channel_mask,
- flags, &config->offload_info);
+ *output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
@@ -867,11 +865,8 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ const audio_config_t *config,
+ audio_output_flags_t flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
@@ -898,7 +893,7 @@
if (stream == AUDIO_STREAM_TTS) {
flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
@@ -909,8 +904,8 @@
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
- audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
- audio_channel_count_from_out_mask(channelMask) <= 2) {
+ audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
+ audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
goto non_direct_output;
}
@@ -924,102 +919,58 @@
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
- samplingRate,
- format,
- channelMask,
+ config->sample_rate,
+ config->format,
+ config->channel_mask,
(audio_output_flags_t)flags);
}
if (profile != 0) {
- sp<SwAudioOutputDescriptor> outputDesc = NULL;
-
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
- outputDesc = desc;
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
- if (session == outputDesc->mDirectClientSession) {
- outputDesc->mDirectOpenCount++;
- ALOGV("getOutputForDevice() reusing direct output %d for session %d",
- mOutputs.keyAt(i), session);
- return mOutputs.keyAt(i);
- } else {
- ALOGV("getOutputForDevice() do not reuse direct output because"
- "current client (%d) is not the same as requesting client (%d)",
- outputDesc->mDirectClientSession, session);
- goto non_direct_output;
- }
+ if ((config->sample_rate == desc->mSamplingRate) &&
+ audio_formats_match(config->format, desc->mFormat) &&
+ (config->channel_mask == desc->mChannelMask) &&
+ (session == desc->mDirectClientSession)) {
+ desc->mDirectOpenCount++;
+ ALOGV("getOutputForDevice() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
+ return mOutputs.keyAt(i);
}
}
}
- // close direct output if currently open and configured with different parameters
- if (outputDesc != NULL) {
- closeOutput(outputDesc->mIoHandle);
+
+ if (!profile->canOpenNewIo()) {
+ goto non_direct_output;
}
- // if the selected profile is offloaded and no offload info was specified,
- // create a default one
- audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
- if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- defaultOffloadInfo.sample_rate = samplingRate;
- defaultOffloadInfo.channel_mask = channelMask;
- defaultOffloadInfo.format = format;
- defaultOffloadInfo.stream_type = stream;
- defaultOffloadInfo.bit_rate = 0;
- defaultOffloadInfo.duration_us = -1;
- defaultOffloadInfo.has_video = true; // conservative
- defaultOffloadInfo.is_streaming = true; // likely
- offloadInfo = &defaultOffloadInfo;
- }
-
- outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- outputDesc->mDevice = device;
- outputDesc->mLatency = 0;
- outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = samplingRate;
- config.channel_mask = channelMask;
- config.format = format;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
- String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
- : String8("");
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ sp<SwAudioOutputDescriptor> outputDesc =
+ new SwAudioOutputDescriptor(profile, mpClientInterface);
+ status = outputDesc->open(config, device, String8(""), stream, flags, &output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
- (samplingRate != 0 && samplingRate != config.sample_rate) ||
- (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
- (channelMask != 0 && channelMask != config.channel_mask)) {
- ALOGV("getOutputForDevice() failed opening direct output: output %d samplingRate %d %d,"
- "format %d %d, channelMask %04x %04x", output, samplingRate,
- outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
- outputDesc->mChannelMask);
+ (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
+ (config->format != AUDIO_FORMAT_DEFAULT &&
+ !audio_formats_match(config->format, outputDesc->mFormat)) ||
+ (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+ ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
+ "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+ outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
+ config->channel_mask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeOutput(output);
+ outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
+ if (audio_is_linear_pcm(config->format) &&
+ config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
@@ -1045,18 +996,18 @@
// open a non direct output
// for non direct outputs, only PCM is supported
- if (audio_is_linear_pcm(format)) {
+ if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, flags, format);
+ output = selectOutput(outputs, flags, config->format);
}
ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
- "samplingRate %d, format %d, channels %x, flags %x",
- stream, samplingRate, format, channelMask, flags);
+ "sampling rate %d, format %d, channels %x, flags %x",
+ stream, config->sample_rate, config->format, config->channel_mask, flags);
return output;
}
@@ -1155,6 +1106,13 @@
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+ if (!outputDesc->isActive()) {
+ if (!outputDesc->mProfile->canStartNewIo()) {
+ return INVALID_OPERATION;
+ }
+ outputDesc->mProfile->curActiveCount++;
+ }
+
// Routing?
mOutputRoutes.incRouteActivity(session);
@@ -1182,6 +1140,12 @@
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
+ if (!outputDesc->isActive()) {
+ LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, outputDesc->mProfile->curActiveCount);
+ outputDesc->mProfile->curActiveCount--;
+ }
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
@@ -1370,7 +1334,15 @@
}
}
- return stopSource(outputDesc, stream, forceDeviceUpdate);
+ status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
+
+ if (status == NO_ERROR && !outputDesc->isActive()) {
+ LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, outputDesc->mProfile->curActiveCount);
+ outputDesc->mProfile->curActiveCount--;
+ }
+ return status;
}
status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
@@ -1473,7 +1445,7 @@
input_type_t *inputType,
audio_port_handle_t *portId)
{
- ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+ ALOGV("getInputForAttr() source %d, sampling rate %d, format %d, channel mask %x,"
"session %d, flags %#x",
attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
@@ -1485,6 +1457,10 @@
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
+ }
+
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -1541,9 +1517,6 @@
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
halInputSource = inputSource;
// TODO: check for existing client for this port ID
@@ -1593,7 +1566,7 @@
}
*input = getInputForDevice(device, address, session, uid, inputSource,
- config->sample_rate, config->format, config->channel_mask, flags,
+ config, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
@@ -1620,9 +1593,7 @@
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
@@ -1641,16 +1612,17 @@
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
- // samplingRate and flags may be updated by getInputProfile
- uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
- audio_format_t profileFormat = format;
- audio_channel_mask_t profileChannelMask = channelMask;
+ // sampling rate and flags may be updated by getInputProfile
+ uint32_t profileSamplingRate = (config->sample_rate == 0) ?
+ SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
+ audio_format_t profileFormat = config->format;
+ audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
profile = getInputProfile(device, address,
@@ -1664,12 +1636,13 @@
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForDevice() could not find profile for device 0x%X, "
- "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
- device, samplingRate, format, channelMask, flags);
+ "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
+ device, config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
+ uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
@@ -1680,14 +1653,14 @@
}
sp<AudioSession> audioSession = new AudioSession(session,
- inputSource,
- format,
- samplingRate,
- channelMask,
- flags,
- uid,
- isSoundTrigger,
- policyMix, mpClientInterface);
+ inputSource,
+ config->format,
+ samplingRate,
+ config->channel_mask,
+ flags,
+ uid,
+ isSoundTrigger,
+ policyMix, mpClientInterface);
// FIXME: disable concurrent capture until UI is ready
#if 0
@@ -1731,8 +1704,8 @@
// can be selected.
if (!isConcurrentSource(inputSource) &&
((desc->mSamplingRate != samplingRate ||
- desc->mChannelMask != channelMask ||
- !audio_formats_match(desc->mFormat, format)) &&
+ desc->mChannelMask != config->channel_mask ||
+ !audio_formats_match(desc->mFormat, config->format)) &&
(source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
source_priority(inputSource)))) {
reusedInputDesc = desc;
@@ -1755,44 +1728,34 @@
}
#endif
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = profileSamplingRate;
- config.channel_mask = profileChannelMask;
- config.format = profileFormat;
-
- if (address == "") {
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
- // the inputs vector must be of size 1, but we don't want to crash here
- address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
+ if (!profile->canOpenNewIo()) {
+ return AUDIO_IO_HANDLE_NONE;
}
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &device,
- address,
- halInputSource,
- profileFlags);
+ sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
+
+ audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = profileSamplingRate;
+ lConfig.channel_mask = profileChannelMask;
+ lConfig.format = profileFormat;
+
+ status_t status = inputDesc->open(&lConfig, device, address,
+ halInputSource, profileFlags, &input);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
- (profileSamplingRate != config.sample_rate) ||
- !audio_formats_match(profileFormat, config.format) ||
- (profileChannelMask != config.channel_mask)) {
- ALOGW("getInputForAttr() failed opening input: samplingRate %d"
- ", format %d, channelMask %x",
- samplingRate, format, channelMask);
+ (profileSamplingRate != lConfig.sample_rate) ||
+ !audio_formats_match(profileFormat, lConfig.format) ||
+ (profileChannelMask != lConfig.channel_mask)) {
+ ALOGW("getInputForAttr() failed opening input: sampling rate %d"
+ ", format %d, channel mask %x",
+ profileSamplingRate, profileFormat, profileChannelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeInput(input);
+ inputDesc->close();
}
return AUDIO_IO_HANDLE_NONE;
}
- sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
- inputDesc->mSamplingRate = profileSamplingRate;
- inputDesc->mFormat = profileFormat;
- inputDesc->mChannelMask = profileChannelMask;
- inputDesc->mDevice = device;
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
@@ -2006,6 +1969,13 @@
setInputDevice(input, device, true /* force */);
if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+ if (!inputDesc->mProfile->canStartNewIo()) {
+ mInputRoutes.decRouteActivity(session);
+ audioSession->changeActiveCount(-1);
+ return INVALID_OPERATION;
+ }
+ inputDesc->mProfile->curActiveCount++;
+
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2075,6 +2045,11 @@
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
+ LOG_ALWAYS_FATAL_IF(inputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, inputDesc->mProfile->curActiveCount);
+ inputDesc->mProfile->curActiveCount--;
+
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2169,7 +2144,7 @@
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
- mpClientInterface->closeInput(mInputs.keyAt(input_index));
+ inputDesc->close();
}
mInputs.clear();
SoundTrigger::setCaptureState(false);
@@ -3632,6 +3607,12 @@
{
const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
+ if (!outProfile->canOpenNewIo()) {
+ ALOGE("Invalid Output profile max open count %u for profile %s",
+ outProfile->maxOpenCount, outProfile->getTagName().c_str());
+ continue;
+ }
+
if (!outProfile->hasSupportedDevices()) {
ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
continue;
@@ -3660,30 +3641,15 @@
const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
-
- outputDesc->mDevice = profileType;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = outputDesc->mSamplingRate;
- config.channel_mask = outputDesc->mChannelMask;
- config.format = outputDesc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ status_t status = outputDesc->open(nullptr, profileType, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
mHwModules[i]->getName());
} else {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
-
for (size_t k = 0; k < supportedDevices.size(); k++) {
ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
// give a valid ID to an attached device once confirmed it is reachable
@@ -3697,11 +3663,11 @@
}
addOutput(output, outputDesc);
setOutputDevice(outputDesc,
- outputDesc->mDevice,
+ profileType,
true,
0,
NULL,
- address.string());
+ address);
}
}
// open input streams needed to access attached devices to validate
@@ -3710,6 +3676,12 @@
{
const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
+ if (!inProfile->canOpenNewIo()) {
+ ALOGE("Invalid Input profile max open count %u for profile %s",
+ inProfile->maxOpenCount, inProfile->getTagName().c_str());
+ continue;
+ }
+
if (!inProfile->hasSupportedDevices()) {
ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
continue;
@@ -3722,30 +3694,15 @@
continue;
}
sp<AudioInputDescriptor> inputDesc =
- new AudioInputDescriptor(inProfile);
+ new AudioInputDescriptor(inProfile, mpClientInterface);
- inputDesc->mDevice = profileType;
-
- // find the address
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
- // the inputs vector must be of size 1, but we don't want to crash here
- String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
- : String8("");
- ALOGV(" for input device 0x%x using address %s", profileType, address.string());
- ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = inputDesc->mSamplingRate;
- config.channel_mask = inputDesc->mChannelMask;
- config.format = inputDesc->mFormat;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
- &input,
- &config,
- &inputDesc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
+ status_t status = inputDesc->open(nullptr,
+ profileType,
+ String8(""),
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
@@ -3760,10 +3717,10 @@
}
}
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
} else {
ALOGW("Cannot open input stream for device %08x on hw module %s",
- inputDesc->mDevice,
+ profileType,
mHwModules[i]->getName());
}
}
@@ -3804,10 +3761,10 @@
AudioPolicyManager::~AudioPolicyManager()
{
for (size_t i = 0; i < mOutputs.size(); i++) {
- mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ mOutputs.valueAt(i)->close();
}
for (size_t i = 0; i < mInputs.size(); i++) {
- mpClientInterface->closeInput(mInputs.keyAt(i));
+ mInputs.valueAt(i)->close();
}
mAvailableOutputDevices.clear();
mAvailableInputDevices.clear();
@@ -3825,7 +3782,6 @@
void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
- outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
updateMono(output); // update mono status when adding to output list
selectOutputForMusicEffects();
@@ -3840,7 +3796,6 @@
void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
{
- inputDesc->setIoHandle(input);
mInputs.add(input, inputDesc);
nextAudioPortGeneration();
}
@@ -3934,30 +3889,20 @@
continue;
}
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Output number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
ALOGV("opening output for device %08x with params %s profile %p name %s",
device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
- config.offload_info.sample_rate = desc->mSamplingRate;
- config.offload_info.channel_mask = desc->mChannelMask;
- config.offload_info.format = desc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
+ status_t status = desc->open(nullptr, device, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status == NO_ERROR) {
// Here is where the out_set_parameters() for card & device gets called
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
@@ -3967,27 +3912,21 @@
updateAudioProfiles(device, output, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkOutputsForDevice() missing param");
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
} else if (profile->hasDynamicAudioProfile()) {
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
- profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ profile->pickAudioProfile(
+ config.sample_rate, config.channel_mask, config.format);
config.offload_info.sample_rate = config.sample_rate;
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
- } else {
+
+ status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+ AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status != NO_ERROR) {
output = AUDIO_IO_HANDLE_NONE;
}
}
@@ -4033,7 +3972,7 @@
} else {
ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
mPrimaryOutput->mIoHandle, output);
- mpClientInterface->closeOutput(output);
+ desc->close();
removeOutput(output);
nextAudioPortGeneration();
output = AUDIO_IO_HANDLE_NONE;
@@ -4161,6 +4100,7 @@
for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
sp<IOProfile> profile = profiles[profile_index];
+
// nothing to do if one input is already opened for this profile
size_t input_index;
for (input_index = 0; input_index < mInputs.size(); input_index++) {
@@ -4176,31 +4116,22 @@
continue;
}
- ALOGV("opening input for device 0x%X with params %s", device, address.string());
- desc = new AudioInputDescriptor(profile);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Input number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
+ desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-
- ALOGV("opening inputput for device %08x with params %s profile %p name %s",
- desc->mDevice, address.string(), profile.get(), profile->getName().string());
-
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &desc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+ status_t status = desc->open(nullptr,
+ device,
+ address,
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
-
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
mpClientInterface->setParameters(input, String8(param));
@@ -4209,7 +4140,7 @@
updateAudioProfiles(device, input, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkInputsForDevice() direct input missing param");
- mpClientInterface->closeInput(input);
+ desc->close();
input = AUDIO_IO_HANDLE_NONE;
}
@@ -4317,11 +4248,8 @@
mpClientInterface->onAudioPatchListUpdate();
}
- AudioParameter param;
- param.add(String8("closing"), String8("true"));
- mpClientInterface->setParameters(output, param.toString());
+ outputDesc->close();
- mpClientInterface->closeOutput(output);
removeOutput(output);
mPreviousOutputs = mOutputs;
}
@@ -4346,7 +4274,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
mInputs.removeItem(input);
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11894dc..2d41bd1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -601,20 +601,15 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo);
+ const audio_config_t *config,
+ audio_output_flags_t flags);
// internal method to return the input handle for the given device and format
audio_io_handle_t getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index bd94e3e..1ee5ccf 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -278,8 +278,8 @@
return NO_INIT;
}
// already checked by client, but double-check in case the client wrapper is bypassed
- if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
- attr->source != AUDIO_SOURCE_FM_TUNER) {
+ if (attr->source < AUDIO_SOURCE_DEFAULT && attr->source >= AUDIO_SOURCE_CNT &&
+ attr->source != AUDIO_SOURCE_HOTWORD && attr->source != AUDIO_SOURCE_FM_TUNER) {
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 7ec3ccb..1fbba58 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -91,9 +91,6 @@
LOCAL_CFLAGS += -Wall -Wextra -Werror
-# Workaround for invalid unused-lambda-capture warning http://b/38349491
-LOCAL_CLANG_CFLAGS += -Wno-error=unused-lambda-capture
-
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2cf648f..585d2eb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -859,6 +859,12 @@
outputStreams.push(getPreviewStreamId());
+ if (params.isDeviceZslSupported) {
+ // If device ZSL is supported, resume preview buffers that may be paused
+ // during last takePicture().
+ mDevice->dropStreamBuffers(false, getPreviewStreamId());
+ }
+
if (!params.recordingHint) {
if (!restart) {
res = mStreamingProcessor->updatePreviewRequest(params);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index a407d0b..910dd78 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -136,7 +136,12 @@
const char *enddump = "\n\n";
write(fd, enddump, strlen(enddump));
- return mHardware->dump(fd, args);
+ sp<CameraHardwareInterface> hardware = mHardware;
+ if (hardware != nullptr) {
+ return hardware->dump(fd, args);
+ }
+ ALOGI("%s: camera device closed already, skip dumping", __FUNCTION__);
+ return OK;
}
// ----------------------------------------------------------------------------
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index b65f1c7..1ee216f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -553,6 +553,12 @@
return DONE;
}
+ if (l.mParameters.isDeviceZslSupported) {
+ // If device ZSL is supported, drop all pending preview buffers to reduce the chance of
+ // rendering preview frames newer than the still frame.
+ client->getCameraDevice()->dropStreamBuffers(true, client->getPreviewStreamId());
+ }
+
/**
* Clear the streaming request for still-capture pictures
* (as opposed to i.e. video snapshots)
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 68384b0..f1f96c3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -359,6 +359,12 @@
const std::vector<android::camera3::OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index e0a2dd4..c0db8e7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2039,6 +2039,20 @@
return res;
}
+status_t Camera3Device::dropStreamBuffers(bool dropping, int streamId) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ int idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
+ return BAD_VALUE;
+ }
+
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
+ return stream->dropBuffers(dropping);
+}
+
/**
* Camera3Device private methods
*/
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 357b893..e9466ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -192,6 +192,12 @@
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ status_t dropStreamBuffers(bool dropping, int streamId) override;
+
private:
// internal typedefs
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 4b36ea2..0a245c4 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -108,6 +108,10 @@
return false;
}
+status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
+ return OK;
+}
+
status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
__FUNCTION__, mId);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3212031..684f4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -57,6 +57,12 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
/**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) override;
+
+ /**
* Return if this output stream is for video encoding.
*/
bool isVideoStream() const;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 329172a..e79eecc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -44,6 +44,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (mConsumer == NULL) {
@@ -70,6 +71,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -100,6 +102,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -139,6 +142,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -227,9 +231,14 @@
/**
* Return buffer back to ANativeWindow
*/
- if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
// Cancel buffer
- ALOGW("A frame is dropped for stream %d", mId);
+ if (mDropBuffers) {
+ ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
+ } else {
+ ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+ }
+
res = currentConsumer->cancelBuffer(currentConsumer.get(),
anwBuffer,
anwReleaseFence);
@@ -785,6 +794,12 @@
return res;
}
+status_t Camera3OutputStream::dropBuffers(bool dropping) {
+ Mutex::Autolock l(mLock);
+ mDropBuffers = dropping;
+ return OK;
+}
+
status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index fbb14fe..18b1901 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -166,6 +166,11 @@
virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
/**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool dropping) override;
+
+ /**
* Set the graphic buffer manager to get/return the stream buffers.
*
* It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
@@ -260,6 +265,9 @@
*/
uint64_t mConsumerUsage;
+ // Whether to drop valid buffers.
+ bool mDropBuffers;
+
/**
* Internal Camera3Stream interface
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index edfbab1..08fcf38 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -73,6 +73,11 @@
const std::vector<OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) = 0;
};
} // namespace camera3
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 8444444..7f42b1b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -272,7 +272,7 @@
}
ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
- "sanitized pkg version: %d",
+ "sanitized pkg version: %" PRId64,
uid_given, item->getUid(),
item->getPkgName().c_str(),
item->getPkgVersionCode());
@@ -856,7 +856,7 @@
} else {
AString pkg;
std::string installer = "";
- int32_t versionCode = 0;
+ int64_t versionCode = 0;
struct passwd *pw = getpwuid(uid);
if (pw) {
@@ -926,7 +926,7 @@
}
- ALOGV("package '%s' installed by '%s' versioncode %d / %08x",
+ ALOGV("package '%s' installed by '%s' versioncode %" PRId64 " / %" PRIx64,
pkg.c_str(), installer.c_str(), versionCode, versionCode);
if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index 3b34f44..fce7d08 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -138,7 +138,7 @@
uid_t uid;
AString pkg;
AString installer;
- int32_t versionCode;
+ int64_t versionCode;
nsecs_t expiration;
};
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 9348ecd..caa0703 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -26,10 +26,6 @@
LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := android.hardware.media.omx@1.0-service.rc
-ifeq ($(PRODUCT_FULL_TREBLE),true)
-LOCAL_CFLAGS += -DUSE_VNDBINDER
-endif
-
include $(BUILD_EXECUTABLE)
# service seccomp policy
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 6f14a42..701ca6e 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -40,10 +40,8 @@
signal(SIGPIPE, SIG_IGN);
SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
-#ifdef USE_VNDBINDER
android::ProcessState::initWithDriver("/dev/vndbinder");
android::ProcessState::self()->startThreadPool();
-#endif // USE_VNDBINDER
::android::hardware::configureRpcThreadpool(64, false);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 51ae665..ac3202b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -142,7 +142,31 @@
}
}
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+ bool closed = false;
+ if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
+ // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
+ // then only one will get the pointer and do the close.
+ sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
+ if (foundStream.get() != nullptr) {
+ foundStream->close();
+ pid_t pid = foundStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+ }
+ closed = true;
+ }
+ return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+ const sp<AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult) {
+ return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ ALOGD("closeStream(0x%08X)", streamHandle);
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
@@ -150,22 +174,13 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("closeStream(0x%08X)", streamHandle);
- // Remove handle from tracker so that we cannot look up the raw address any more.
- // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
- // then only one will get the pointer and do the close.
- serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
- if (serviceStream.get() != nullptr) {
- serviceStream->close();
- pid_t pid = serviceStream->getOwnerProcessId();
- AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
- return AAUDIO_OK;
- } else {
- ALOGW("closeStream(0x%0x) being handled by another thread", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
-}
+ pid_t pid = serviceStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
+ serviceStream->setCloseNeeded(true);
+ (void) releaseStream(serviceStream);
+ return AAUDIO_OK;
+}
sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
aaudio_handle_t streamHandle) {
@@ -181,7 +196,9 @@
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
- serviceStream = nullptr;
+ serviceStream.clear();
+ } else {
+ serviceStream->incrementServiceReferenceCount();
}
}
return serviceStream;
@@ -198,7 +215,7 @@
aaudio_result_t result = serviceStream->getDescription(parcelable);
// parcelable.dump();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
@@ -208,7 +225,8 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->start();
+ aaudio_result_t result = serviceStream->start();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
@@ -218,7 +236,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->pause();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
@@ -228,7 +246,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->stop();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
@@ -237,48 +255,51 @@
ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->flush();
+ aaudio_result_t result = serviceStream->flush();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
- ALOGE("registerAudioThread(), thread already registered");
- return AAUDIO_ERROR_INVALID_STATE;
- }
-
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
- serviceStream->setRegisteredThread(clientThreadId);
- int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
- if (err != 0){
- ALOGE("registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
- return AAUDIO_ERROR_INTERNAL;
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ result = AAUDIO_ERROR_INVALID_STATE;
} else {
- return AAUDIO_OK;
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(ownerPid, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0) {
+ ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+ clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ result = AAUDIO_ERROR_INTERNAL;
+ }
}
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("unregisterAudioThread(), wrong thread");
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ serviceStream->setRegisteredThread(0);
}
- serviceStream->setRegisteredThread(0);
- return AAUDIO_OK;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -289,7 +310,8 @@
ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->startClient(client, clientHandle);
+ aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
@@ -299,5 +321,6 @@
ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->stopClient(clientHandle);
+ aaudio_result_t result = serviceStream->stopClient(clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..bdd9e0b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -94,9 +94,15 @@
aaudio::aaudio_handle_t streamHandle);
- android::AudioClient mAudioClient;
- aaudio::AAudioStreamTracker mStreamTracker;
+ bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+ aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult);
+
+ android::AudioClient mAudioClient;
+
+ aaudio::AAudioStreamTracker mStreamTracker;
enum constants {
DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 635b45c..53d2860 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -402,3 +402,13 @@
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
}
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return --mCallingCount;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 29987f6..5f5bb98 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -199,6 +199,26 @@
return mFlowing;
}
+ /**
+ * Atomically increment the number of active references to the stream by AAudioService.
+ * @return value after the increment
+ */
+ int32_t incrementServiceReferenceCount();
+
+ /**
+ * Atomically decrement the number of active references to the stream by AAudioService.
+ * @return value after the decrement
+ */
+ int32_t decrementServiceReferenceCount();
+
+ bool isCloseNeeded() const {
+ return mCloseNeeded.load();
+ }
+
+ void setCloseNeeded(bool needed) {
+ mCloseNeeded.store(needed);
+ }
+
protected:
/**
@@ -256,8 +276,11 @@
private:
aaudio_handle_t mHandle = -1;
-
bool mFlowing = false;
+
+ std::mutex mCallingCountLock;
+ std::atomic<int32_t> mCallingCount{0};
+ std::atomic<bool> mCloseNeeded{false};
};
} /* namespace aaudio */