Merge "Use -Werror in frameworks/av/media/libaaudio/examples."
diff --git a/camera/Android.bp b/camera/Android.bp
index c76ae50..24b3918 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -29,12 +29,7 @@
// AIDL files for camera interfaces
// The headers for these interfaces will be available to any modules that
// include libcamera_client, at the path "aidl/package/path/BnFoo.h"
- "aidl/android/hardware/ICameraService.aidl",
- "aidl/android/hardware/ICameraServiceListener.aidl",
- "aidl/android/hardware/ICameraServiceProxy.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
-
+ ":libcamera_client_aidl",
// Source for camera interface parcelables, and manually-written interfaces
"Camera.cpp",
@@ -81,3 +76,25 @@
],
}
+
+// AIDL interface between camera clients and the camera service.
+filegroup {
+ name: "libcamera_client_aidl",
+ srcs: [
+ "aidl/android/hardware/ICameraService.aidl",
+ "aidl/android/hardware/ICameraServiceListener.aidl",
+ "aidl/android/hardware/ICameraServiceProxy.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
+ ],
+}
+
+// Extra AIDL files that are used by framework.jar but not libcamera_client
+// because they have hand-written native implementations.
+filegroup {
+ name: "libcamera_client_framework_aidl",
+ srcs: [
+ "aidl/android/hardware/ICamera.aidl",
+ "aidl/android/hardware/ICameraClient.aidl",
+ ],
+}
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index ddc4b16..44ed034 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -80,6 +80,7 @@
static bool gDisplayHistogram;
static bool showProgress = true;
static String8 gWriteMP4Filename;
+static String8 gComponentNameOverride;
static sp<ANativeWindow> gSurface;
@@ -193,7 +194,10 @@
CHECK(!gPreferSoftwareCodec);
flags |= MediaCodecList::kHardwareCodecsOnly;
}
- rawSource = SimpleDecodingSource::Create(source, flags, gSurface);
+ rawSource = SimpleDecodingSource::Create(
+ source, flags, gSurface,
+ gComponentNameOverride.isEmpty() ? nullptr : gComponentNameOverride.c_str(),
+ !gComponentNameOverride.isEmpty());
if (rawSource == NULL) {
return;
}
@@ -618,6 +622,7 @@
fprintf(stderr, " -o playback audio\n");
fprintf(stderr, " -w(rite) filename (write to .mp4 file)\n");
fprintf(stderr, " -k seek test\n");
+ fprintf(stderr, " -O(verride) name of the component\n");
fprintf(stderr, " -x display a histogram of decoding times/fps "
"(video only)\n");
fprintf(stderr, " -q don't show progress indicator\n");
@@ -703,7 +708,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kxSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kO:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -732,6 +737,12 @@
break;
}
+ case 'O':
+ {
+ gComponentNameOverride.setTo(optarg);
+ break;
+ }
+
case 'l':
{
listComponents = true;
@@ -1073,7 +1084,7 @@
i, MediaExtractor::kIncludeExtensiveMetaData);
if (meta == NULL) {
- break;
+ continue;
}
const char *mime;
meta->findCString(kKeyMIMEType, &mime);
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 2450920..fc5830a 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -25,6 +25,7 @@
#include "aaudio/AAudio.h"
#include <aaudio/AAudioTesting.h>
+#include <math.h>
#include "utility/AAudioUtilities.h"
@@ -50,44 +51,10 @@
return size;
}
-
// TODO expose and call clamp16_from_float function in primitives.h
static inline int16_t clamp16_from_float(float f) {
- /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
- * floating point significand. The normal shift is 3<<22, but the -15 offset
- * is used to multiply by 32768.
- */
- static const float offset = (float)(3 << (22 - 15));
- /* zero = (0x10f << 22) = 0x43c00000 (not directly used) */
- static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
- static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
-
- union {
- float f;
- int32_t i;
- } u;
-
- u.f = f + offset; /* recenter valid range */
- /* Now the valid range is represented as integers between [limneg, limpos].
- * Clamp using the fact that float representation (as an integer) is an ordered set.
- */
- if (u.i < limneg)
- u.i = -32768;
- else if (u.i > limpos)
- u.i = 32767;
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
-}
-
-// Same but without clipping.
-// Convert -1.0f to +1.0f to -32768 to +32767
-static inline int16_t floatToInt16(float f) {
- static const float offset = (float)(3 << (22 - 15));
- union {
- float f;
- int32_t i;
- } u;
- u.f = f + offset; /* recenter valid range */
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+ static const float scale = 1 << 15;
+ return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
}
static float clipAndClampFloatToPcm16(float sample, float scaler) {
@@ -188,13 +155,14 @@
int32_t samplesPerFrame,
float amplitude1,
float amplitude2) {
- float scaler = amplitude1 / SHORT_SCALE;
- float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+ float scaler = amplitude1;
+ float delta = (amplitude2 - amplitude1) / numFrames;
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
// No need to clip because int16_t range is inherently limited.
float sample = *source++ * scaler;
- *destination++ = floatToInt16(sample);
+ *destination++ = (int16_t) roundf(sample);
}
scaler += delta;
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 884a2b3..9f80695 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -111,3 +111,16 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_aaudio_monkey",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_aaudio_monkey.cpp"],
+ header_libs: ["libaaudio_example_utils"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
new file mode 100644
index 0000000..be54835
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Try to trigger bugs by playing randomly on multiple streams.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include "AAudioArgsParser.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+#include "SineGenerator.h"
+
+#define DEFAULT_TIMEOUT_NANOS (1 * NANOS_PER_SECOND)
+
+#define NUM_LOOPS 1000
+#define MAX_MICROS_DELAY (2 * 1000 * 1000)
+
+// TODO Consider adding an input stream.
+#define PROB_START (0.20)
+#define PROB_PAUSE (PROB_START + 0.10)
+#define PROB_FLUSH (PROB_PAUSE + 0.10)
+#define PROB_STOP (PROB_FLUSH + 0.10)
+#define PROB_CLOSE (PROB_STOP + 0.10)
+static_assert(PROB_CLOSE < 0.9, "Probability sum too high.");
+
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+void AAudioMonkeyErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+// This function is not thread safe. Only use this from a single thread.
+double nextRandomDouble() {
+ return drand48();
+}
+
+class AAudioMonkey : public AAudioSimplePlayer {
+public:
+
+ AAudioMonkey(int index, AAudioArgsParser *argParser)
+ : mArgParser(argParser)
+ , mIndex(index) {}
+
+ aaudio_result_t open() {
+ printf("Monkey # %d ---------------------------------------------- OPEN\n", mIndex);
+ double offset = mIndex * 50;
+ mSine1.setup(440.0, 48000);
+ mSine1.setSweep(300.0 + offset, 600.0 + offset, 5.0);
+ mSine2.setup(660.0, 48000);
+ mSine2.setSweep(350.0 + offset, 900.0 + offset, 7.0);
+
+ aaudio_result_t result = AAudioSimplePlayer::open(*mArgParser,
+ AAudioMonkeyDataCallback,
+ AAudioMonkeyErrorCallbackProc,
+ this);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - player.open() returned %d\n", result);
+ }
+
+ mArgParser->compareWithStream(getStream());
+ return result;
+ }
+
+ bool isOpen() {
+ return (getStream() != nullptr);
+
+ }
+ /**
+ *
+ * @return true if stream passes tests
+ */
+ bool validate() {
+ if (!isOpen()) return true; // closed is OK
+
+ // update and query stream state
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
+ return false;
+ }
+
+ int64_t framesRead = AAudioStream_getFramesRead(getStream());
+ int64_t framesWritten = AAudioStream_getFramesWritten(getStream());
+ int32_t xRuns = AAudioStream_getXRunCount(getStream());
+ // Print status
+ printf("%30s, framesWritten = %8lld, framesRead = %8lld, xRuns = %d\n",
+ AAudio_convertStreamStateToText(state),
+ (unsigned long long) framesWritten,
+ (unsigned long long) framesRead,
+ xRuns);
+
+ if (framesWritten < framesRead) {
+ printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
+ (int) (framesWritten - framesRead));
+ }
+ return true;
+ }
+
+ aaudio_result_t invoke() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (!isOpen()) {
+ result = open();
+ if (result != AAUDIO_OK) return result;
+ }
+
+ if (!validate()) {
+ return -1;
+ }
+
+ double dice = nextRandomDouble();
+ // Select an action based on a weighted probability.
+ if (dice < PROB_START) {
+ printf("start\n");
+ result = AAudioStream_requestStart(getStream());
+ } else if (dice < PROB_PAUSE) {
+ printf("pause\n");
+ result = AAudioStream_requestPause(getStream());
+ } else if (dice < PROB_FLUSH) {
+ printf("flush\n");
+ result = AAudioStream_requestFlush(getStream());
+ } else if (dice < PROB_STOP) {
+ printf("stop\n");
+ result = AAudioStream_requestStop(getStream());
+ } else if (dice < PROB_CLOSE) {
+ printf("close\n");
+ result = close();
+ } else {
+ printf("do nothing\n");
+ }
+
+ if (result == AAUDIO_ERROR_INVALID_STATE) {
+ printf(" got AAUDIO_ERROR_INVALID_STATE - expected from a monkey\n");
+ result = AAUDIO_OK;
+ }
+ if (result == AAUDIO_OK && isOpen()) {
+ if (!validate()) {
+ result = -1;
+ }
+ }
+ return result;
+ }
+
+ aaudio_data_callback_result_t renderAudio(
+ AAudioStream *stream,
+ void *audioData,
+ int32_t numFrames) {
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+private:
+ const AAudioArgsParser *mArgParser;
+ const int mIndex;
+ SineGenerator mSine1;
+ SineGenerator mSine2;
+};
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - AAudioMonkeyDataCallback needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ AAudioMonkey *monkey = (AAudioMonkey *) userData;
+ return monkey->renderAudio(stream, audioData, numFrames);
+}
+
+
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -i{seed} Initial random seed\n");
+ printf(" -t{count} number of monkeys in the Troop\n");
+}
+
+int main(int argc, const char **argv) {
+ AAudioArgsParser argParser;
+ std::vector<AAudioMonkey> monkeys;
+ aaudio_result_t result;
+ int numMonkeys = 1;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monkeys\n", argv[0]);
+
+ long int seed = (long int)getNanoseconds(); // different every time by default
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'i':
+ seed = atol(&arg[2]);
+ break;
+ case 't':
+ numMonkeys = atoi(&arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
+ }
+
+ srand48(seed);
+ printf("seed = %ld, nextRandomDouble() = %f\n", seed, nextRandomDouble());
+
+ for (int m = 0; m < numMonkeys; m++) {
+ monkeys.emplace_back(m, &argParser);
+ }
+
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ // pick a random monkey and invoke it
+ double dice = nextRandomDouble();
+ int monkeyIndex = floor(dice * numMonkeys);
+ printf("----------- Monkey #%d\n", monkeyIndex);
+ result = monkeys[monkeyIndex].invoke();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ // sleep some random time
+ dice = nextRandomDouble();
+ dice = dice * dice * dice; // skew towards smaller delays
+ int micros = (int) (dice * MAX_MICROS_DELAY);
+ usleep(micros);
+
+ // TODO consider making this multi-threaded, one thread per monkey, to catch more bugs
+ }
+
+ printf("PASS\n");
+ return EXIT_SUCCESS;
+
+error:
+ printf("FAIL - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ usleep(1000 * 1000); // give me time to stop the logcat
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
index 5c53982..93226ba 100644
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -15,13 +15,13 @@
*/
#include <iostream>
+#include <math.h>
#include <gtest/gtest.h>
#include "utility/AAudioUtilities.h"
#include "utility/LinearRamp.h"
-
TEST(test_linear_ramp, linear_ramp_segments) {
LinearRamp ramp;
const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
@@ -32,40 +32,40 @@
ramp.setLengthInFrames(8);
ramp.setTarget(8.0f);
- ASSERT_EQ(8, ramp.getLengthInFrames());
+ EXPECT_EQ(8, ramp.getLengthInFrames());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(0.0f, levelFrom);
- ASSERT_EQ(4.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(0.0f, levelFrom);
+ EXPECT_EQ(4.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(0.0f, destination[0]);
- ASSERT_EQ(1.0f, destination[1]);
- ASSERT_EQ(2.0f, destination[2]);
- ASSERT_EQ(3.0f, destination[3]);
+ EXPECT_EQ(0.0f, destination[0]);
+ EXPECT_EQ(1.0f, destination[1]);
+ EXPECT_EQ(2.0f, destination[2]);
+ EXPECT_EQ(3.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
@@ -80,29 +80,101 @@
ramp.setLengthInFrames(4);
ramp.setTarget(8.0f);
ramp.forceCurrent(4.0f);
- ASSERT_EQ(4.0f, ramp.getCurrent());
+ EXPECT_EQ(4.0f, ramp.getCurrent());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
+constexpr int16_t kMaxI16 = INT16_MAX;
+constexpr int16_t kMinI16 = INT16_MIN;
+constexpr int16_t kHalfI16 = 16384;
+constexpr int16_t kTenthI16 = 3277;
+
+//void AAudioConvert_floatToPcm16(const float *source,
+// int16_t *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, float_to_i16) {
+ const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16, kMinI16};
+
+ AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
+
+//void AAudioConvert_pcm16ToFloat(const int16_t *source,
+// float *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, i16_to_float) {
+ const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ float destination[count];
+ const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
+
+ AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_NEAR(expected[i], destination[i], 0.0001f);
+ }
+
+}
+
+//void AAudio_linearRamp(const int16_t *source,
+// int16_t *destination,
+// int32_t numFrames,
+// int32_t samplesPerFrame,
+// float amplitude1,
+// float amplitude2);
+TEST(test_linear_ramp, ramp_i16_to_i16) {
+ const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ // Ramp will sweep from -1 to almost +1
+ const int16_t expected[count] = {
+ -1, // from -1.00
+ -1, // from -0.75
+ -1, // from -0.55, round away from zero
+ 0, // from -0.25, round up to zero
+ 0, // from 0.00
+ 0, // from 0.25, round down to zero
+ 1, // from 0.50, round away from zero
+ 1 // from 0.75
+ };
+
+ // sweep across zero to test symmetry
+ constexpr float amplitude1 = -1.0;
+ constexpr float amplitude2 = 1.0;
+ AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index 9e505d5..de386da 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -41,10 +41,76 @@
// Test AAudioStream_setBufferSizeInFrames()
+constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+
+//int foo() { // To fix Android Studio formatting when editing.
+TEST(test_various, aaudio_stop_when_open) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+// Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+// Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+// Create an AAudioStream using the Builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+//int boo() { // To fix Android Studio formatting when editing.
+TEST(test_various, aaudio_flush_when_started) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+// Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+// Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+// Create an AAudioStream using the Builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_STARTING, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, AAudioStream_requestFlush(aaudioStream));
+
+ state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
//int main() { // To fix Android Studio formatting when editing.
TEST(test_various, aaudio_set_buffer_size) {
-
- aaudio_result_t result = AAUDIO_OK;
int32_t bufferCapacity;
int32_t framesPerBurst = 0;
int32_t actualSize = 0;
@@ -103,5 +169,4 @@
AAudioStream_close(aaudioStream);
AAudioStreamBuilder_delete(aaudioBuilder);
- printf(" result = %d = %s\n", result, AAudio_convertResultToText(result));
}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 98e8d95..bedde43 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -20,7 +20,7 @@
// The headers for these interfaces will be available to any modules that
// include libaudioclient, at the path "aidl/package/path/BnFoo.h"
"aidl/android/media/IAudioRecord.aidl",
- "aidl/android/media/IPlayer.aidl",
+ ":libaudioclient_aidl",
"AudioEffect.cpp",
"AudioPolicy.cpp",
@@ -70,3 +70,11 @@
],
},
}
+
+// AIDL interface between libaudioclient and framework.jar
+filegroup {
+ name: "libaudioclient_aidl",
+ srcs: [
+ "aidl/android/media/IPlayer.aidl",
+ ],
+}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 30f97ac..c8fa618 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -197,7 +197,7 @@
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
@@ -228,7 +228,7 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
@@ -284,6 +284,11 @@
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
{
+ status_t status;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
@@ -306,25 +311,29 @@
case TRANSFER_CALLBACK:
if (cbf == NULL || sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
if (sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_SHARED:
if (sharedBuffer == 0) {
ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
@@ -338,7 +347,8 @@
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
// handle default values first.
@@ -348,7 +358,8 @@
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("Invalid stream type %d", streamType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mStreamType = streamType;
@@ -380,16 +391,18 @@
// validate parameters
if (!audio_is_valid_format(format)) {
ALOGE("Invalid format %#x", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
// force direct flag if format is not linear PCM
@@ -424,7 +437,8 @@
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
@@ -455,12 +469,14 @@
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
ALOGE("notificationFrames=%d not permitted for non-fast track",
notificationFrames);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
if (frameCount > 0) {
ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
notificationFrames, frameCount);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -472,15 +488,15 @@
notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
}
mNotificationFramesAct = 0;
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -495,7 +511,7 @@
}
// create the IAudioTrack
- status_t status = createTrack_l();
+ status = createTrack_l();
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -503,10 +519,9 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -534,7 +549,10 @@
mFramesWrittenServerOffset = 0;
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
mVolumeHandler = new media::VolumeHandler();
- return NO_ERROR;
+
+exit:
+ mStatus = status;
+ return status;
}
// -------------------------------------------------------------------------
@@ -1278,15 +1296,16 @@
status_t AudioTrack::createTrack_l()
{
+ status_t status;
+ bool callbackAdded = false;
+
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- status_t status;
- bool callbackAdded = false;
-
{
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioTrack is re-created.
@@ -1355,7 +1374,10 @@
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
- goto error;
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
+ goto exit;
}
ALOG_ASSERT(track != 0);
@@ -1383,13 +1405,13 @@
if (iMem == 0) {
ALOGE("Could not get control block");
status = NO_INIT;
- goto error;
+ goto exit;
}
void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
@@ -1443,7 +1465,7 @@
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
}
@@ -1486,17 +1508,15 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
}
-error:
- if (callbackAdded) {
+exit:
+ if (status != NO_ERROR && callbackAdded) {
// note: mOutput is always valid is callbackAdded is true
AudioSystem::removeAudioDeviceCallback(this, mOutput);
}
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+
+ mStatus = status;
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0397eec..970ae90 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -928,6 +928,7 @@
bool hasAttributes = data.readInt32() != 0;
if (hasAttributes) {
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
}
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
@@ -993,6 +994,7 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_attributes_t attr;
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
audio_session_t session = (audio_session_t)data.readInt32();
pid_t pid = (pid_t)data.readInt32();
@@ -1368,6 +1370,7 @@
data.read(&source, sizeof(struct audio_port_config));
audio_attributes_t attributes;
data.read(&attributes, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attributes);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
@@ -1418,6 +1421,15 @@
}
}
+void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
+{
+ const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+ if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+ android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
+ }
+ attr->tags[tagsMaxSize - 1] = '\0';
+}
+
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 7c88e57..5558b77 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -178,6 +178,8 @@
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
+private:
+ void sanetizeAudioAttributes(audio_attributes_t* attr);
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
new file mode 100644
index 0000000..e9c1606
--- /dev/null
+++ b/media/libaudioclient/tests/Android.bp
@@ -0,0 +1,20 @@
+cc_defaults {
+ name: "libaudioclient_tests_defaults",
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+}
+
+cc_test {
+ name: "test_create_audiotrack",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["test_create_audiotrack.cpp"],
+ shared_libs: [
+ "libaudioclient",
+ "libcutils",
+ "libutils",
+ "libbinder",
+ ],
+ data: ["track_test_input_*.txt"],
+}
diff --git a/media/libaudioclient/tests/test_create_audiotrack.cpp b/media/libaudioclient/tests/test_create_audiotrack.cpp
new file mode 100644
index 0000000..b0351b2
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_audiotrack.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Handle a DISCONNECT by only opening and starting a new stream
+ * without stopping and closing the old one.
+ * This caused the new stream to use the old disconnected device.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryDealer.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/AudioTrack.h>
+
+#define MAX_INPUT_FILE_LINE_LENGTH 512
+#define MAX_OUTPUT_FILE_LINE_LENGTH 512
+
+#define NUM_ARGUMENTS 10
+#define VERSION_KEY "version"
+#define VERSION_VALUE "1.0"
+
+namespace android {
+
+int readLine(FILE *inputFile, char *line, int size) {
+ int ret = 0;
+ while (true) {
+ char *str = fgets(line, size, inputFile);
+ if (str == nullptr) {
+ ret = -1;
+ break;
+ }
+ if (feof(inputFile) != 0 || ferror(inputFile) != 0) {
+ ret = -1;
+ break;
+ }
+ if (strlen(str) != 0 && str[0] != '#') {
+ break;
+ }
+ }
+ return ret;
+}
+
+bool checkVersion(FILE *inputFile)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionKey[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionValue[MAX_INPUT_FILE_LINE_LENGTH];
+
+ if (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) != 0) {
+ fprintf(stderr, "Missing version in input file\n");
+ return false;
+ }
+
+ if (sscanf(line, " %s %s", versionKey, versionValue) != 2) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionKey, VERSION_KEY) != 0) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionValue, VERSION_VALUE) != 0) {
+ fprintf(stderr, "Wrong input file version %s expecting %s\n", versionValue, VERSION_VALUE);
+ return false;
+ }
+ return true;
+}
+
+void callback(int event __unused, void* user __unused, void *info __unused)
+{
+}
+
+void testTrack(FILE *inputFile, int outputFileFd)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ uint32_t testCount = 0;
+ Vector<String16> args;
+
+ if (inputFile == nullptr) {
+ sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_DEFAULT,
+ 0 /* sampleRate */,
+ AUDIO_FORMAT_DEFAULT,
+ AUDIO_CHANNEL_OUT_STEREO);
+ if (track == 0 || track->initCheck() != NO_ERROR) {
+ write(outputFileFd, "Error creating AudioTrack\n",
+ sizeof("Error creating AudioTrack\n"));
+ } else {
+ track->dump(outputFileFd, args);
+ }
+ return;
+ }
+
+ // check version
+ if (!checkVersion(inputFile)) {
+ return;
+ }
+
+ while (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) == 0) {
+ uint32_t sampleRate;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
+ size_t frameCount;
+ int32_t notificationFrames;
+ uint32_t useSharedBuffer;
+ audio_output_flags_t flags;
+ audio_session_t sessionId;
+ audio_usage_t usage;
+ audio_content_type_t contentType;
+ audio_attributes_t attributes;
+ sp<IMemory> sharedBuffer;
+ sp<MemoryDealer> heap;
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+ status_t status;
+ char statusStr[MAX_OUTPUT_FILE_LINE_LENGTH];
+ bool offload = false;
+ bool fast = false;
+
+ if (sscanf(line, " %u %x %x %zu %d %u %x %u %u %u",
+ &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames, &useSharedBuffer,
+ &flags, &sessionId, &usage, &contentType) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ continue;
+ }
+ testCount++;
+
+ if (useSharedBuffer != 0) {
+ size_t heapSize = audio_channel_count_from_out_mask(channelMask) *
+ audio_bytes_per_sample(format) * frameCount;
+ heap = new MemoryDealer(heapSize, "AudioTrack Heap Base");
+ sharedBuffer = heap->allocate(heapSize);
+ frameCount = 0;
+ notificationFrames = 0;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = format;
+ offload = true;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
+ fast = true;
+ }
+
+ memset(&attributes, 0, sizeof(attributes));
+ attributes.content_type = contentType;
+ attributes.usage = usage;
+
+ sp<AudioTrack> track = new AudioTrack();
+
+ track->set(AUDIO_STREAM_DEFAULT,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ (fast || offload) ? callback : nullptr,
+ nullptr,
+ notificationFrames,
+ sharedBuffer,
+ false,
+ sessionId,
+ ((fast && sharedBuffer == 0) || offload) ?
+ AudioTrack::TRANSFER_CALLBACK : AudioTrack::TRANSFER_DEFAULT,
+ offload ? &offloadInfo : nullptr,
+ getuid(),
+ getpid(),
+ &attributes,
+ false,
+ 1.0f,
+ AUDIO_PORT_HANDLE_NONE);
+ status = track->initCheck();
+ sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
+ write(outputFileFd, statusStr, strlen(statusStr));
+ if (status != NO_ERROR) {
+ continue;
+ }
+ track->dump(outputFileFd, args);
+ }
+}
+
+}; // namespace android
+
+
+int main(int argc, char **argv)
+{
+ FILE *inputFile = nullptr;
+ int outputFileFd = STDOUT_FILENO;
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ int ret = 0;
+
+ if (argc > 5) {
+ fprintf(stderr, "Usage: %s [-i input_params.txt] [-o output_params.txt]\n", argv[0]);
+ return 1;
+ }
+
+ argv++;
+ while (*argv) {
+ if (strcmp(*argv, "-i") == 0) {
+ argv++;
+ if (*argv) {
+ inputFile = fopen(*argv, "r");
+ if (inputFile == nullptr) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ if (strcmp(*argv, "-o") == 0) {
+ argv++;
+ if (*argv) {
+ outputFileFd = open(*argv, O_WRONLY|O_CREAT, mode);
+ if (outputFileFd < 0) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ argv++;
+ }
+ if (*argv) {
+ argv++;
+ }
+ }
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ android::testTrack(inputFile, outputFileFd);
+
+ if (inputFile) {
+ fclose(inputFile);
+ }
+ if (outputFileFd >= 0 && outputFileFd != STDOUT_FILENO) {
+ close(outputFileFd);
+ }
+
+ return ret;
+}
+
diff --git a/media/libaudioclient/tests/track_test_input_v1.0_ref.txt b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
new file mode 100644
index 0000000..b923ff3
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
@@ -0,0 +1,40 @@
+version 1.0
+# Input file for test_create_audiotrack
+# Add one line for each tested AudioTrack constructor with the following arguments:
+# sampleRate format channelMask frameCount notificationFrames sharedBuffer flags sessionId usage contentType
+# sample rate tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 24000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 16000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 8000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 44100 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 22050 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 11025 0x1 0x3 4410 2205 0 0x0 0 1 2
+# format tests
+ 48000 0x2 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x3 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x5 0x3 4800 2400 0 0x0 0 1 2
+# channel mask tests
+ 48000 0x1 0x1 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x3F 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x63F 4800 2400 0 0x0 0 1 2
+# framecount tests
+ 48000 0x1 0x3 0 0 0 0x0 0 1 2
+ 48000 0x1 0x3 48000 0 0 0x0 0 1 2
+ 48000 0x1 0x3 0 -2 0 0x4 0 1 2
+# shared memory tests
+ 48000 0x1 0x3 4800 2400 1 0x0 0 1 2
+ 48000 0x1 0x3 4800 2400 1 0x4 0 1 2
+# flags test
+ 48000 0x1 0x3 4800 2400 0 0x4 0 1 2
+ 48000 0x1 0x3 4800 2400 0 0x8 0 1 2
+ 44100 0x1000000 0x3 4800 2400 0 0x11 0 1 2
+# session tests
+ 48000 0x1 0x3 4800 2400 0 0x0 1001 1 2
+# attributes tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 0 0
+ 48000 0x1 0x3 4800 2400 0 0x0 0 2 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 4 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 5 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 11 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 12 1
diff --git a/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
new file mode 100644
index 0000000..5fe433c
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
@@ -0,0 +1,308 @@
+
+#### Test 1 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(49), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 2 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(57), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(24000), original sample rate(24000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 3 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(65), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(16000), original sample rate(16000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (350), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 4 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(73), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(8000), original sample rate(8000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (650), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 5 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(81), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 6 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(89), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(22050), original sample rate(22050), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 7 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(97), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(11025), original sample rate(11025), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (450), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 8 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(105), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(2), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 9 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(113), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(3), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 10 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(121), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(5), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 11 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(129), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(1), channel count(1)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 12 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(137), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3f), channel count(6)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 13 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(145), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(63f), channel count(8)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 14 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(153), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(1924), req. frame count(1924)
+ notif. frame count(962), req. notif. frame count(0), req. notif. per buff(0)
+ latency (90), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 15 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(161), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(48000), req. frame count(48000)
+ notif. frame count(24000), req. notif. frame count(0), req. notif. per buff(0)
+ latency (1050), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 16 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(169), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(480), req. frame count(480)
+ notif. frame count(240), req. notif. frame count(0), req. notif. per buff(2)
+ latency (60), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 17 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(177), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 18 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(185), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 19 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(193), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(240), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 20 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(201), flags(8)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 21 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(209), flags(11)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1000000), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(4800), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (204), selected device Id(0), routed device Id(2)
+ output(53) AF latency (96) AF frame count(262144) AF SampleRate(44100)
+
+#### Test 22 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(1001), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 23 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(217), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 24 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(225), flags(0)
+ stream type(0), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (140), selected device Id(0), routed device Id(1)
+ output(45) AF latency (40) AF frame count(960) AF SampleRate(48000)
+
+#### Test 25 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(233), flags(0)
+ stream type(4), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 26 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(241), flags(0)
+ stream type(5), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 27 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(249), flags(0)
+ stream type(10), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 28 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(257), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
index 61fb6bab..f4d1958 100644
--- a/media/libaudiohal/EffectHalHidl.cpp
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -121,16 +121,24 @@
}
status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (mInBuffer == 0 || buffer->audioBuffer() != mInBuffer->audioBuffer()) {
- mBuffersChanged = true;
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mInBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
+ }
}
mInBuffer = buffer;
return OK;
}
status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (mOutBuffer == 0 || buffer->audioBuffer() != mOutBuffer->audioBuffer()) {
- mBuffersChanged = true;
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mOutBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
+ }
}
mOutBuffer = buffer;
return OK;
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 4398a91..3e88c7c 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -5,6 +5,11 @@
srcs: ["src/EffectsConfig.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
shared_libs: [
"liblog",
"libtinyxml2",
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 0c71487..a618676 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -37,6 +37,8 @@
"AudioPlayer.cpp",
"AudioSource.cpp",
"BufferImpl.cpp",
+ "CCodec.cpp",
+ "CCodecBufferChannel.cpp",
"CodecBase.cpp",
"CallbackDataSource.cpp",
"CallbackMediaSource.cpp",
@@ -89,6 +91,7 @@
"libdl",
"libdrmframework",
"libgui",
+ "libion",
"liblog",
"libmedia",
"libmedia_omx",
@@ -100,6 +103,7 @@
"libui",
"libutils",
"libmedia_helper",
+ "libstagefright_codec2",
"libstagefright_foundation",
"libstagefright_omx",
"libstagefright_omx_utils",
@@ -111,6 +115,11 @@
"android.hidl.allocator@1.0",
"android.hardware.cas.native@1.0",
"android.hardware.media.omx@1.0",
+ "android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.mapper@2.0",
+
+ // XXX: hack
+ "libstagefright_soft_c2avcdec",
],
static_libs: [
@@ -125,6 +134,9 @@
"libstagefright_esds",
"libstagefright_id3",
"libFLAC",
+
+ // XXX: hack
+ "libstagefright_codec2_vndk",
],
export_shared_lib_headers: [
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index fee3739..9fb6d34 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -24,11 +24,14 @@
#include <media/ICrypto.h>
#include <utils/NativeHandle.h>
+#include "include/Codec2Buffer.h"
#include "include/SecureBuffer.h"
#include "include/SharedMemoryBuffer.h"
namespace android {
+// SharedMemoryBuffer
+
SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
: MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
mMemory(mem) {
@@ -39,6 +42,8 @@
mTMemory(mem) {
}
+// SecureBuffer
+
SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
: MediaCodecBuffer(format, new ABuffer(nullptr, size)),
mPointer(ptr) {
@@ -59,4 +64,28 @@
return ICrypto::kDestinationTypeNativeHandle;
}
+// Codec2Buffer
+
+// static
+sp<Codec2Buffer> Codec2Buffer::allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block) {
+ C2WriteView writeView(block->map().get());
+ if (writeView.error() != C2_OK) {
+ return nullptr;
+ }
+ return new Codec2Buffer(format, new ABuffer(writeView.base(), writeView.capacity()), block);
+}
+
+C2ConstLinearBlock Codec2Buffer::share() {
+ return mBlock->share(offset(), size(), C2Fence());
+}
+
+Codec2Buffer::Codec2Buffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ const std::shared_ptr<C2LinearBlock> &block)
+ : MediaCodecBuffer(format, buffer),
+ mBlock(block) {
+}
+
} // namespace android
diff --git a/media/libstagefright/CCodec.cpp b/media/libstagefright/CCodec.cpp
new file mode 100644
index 0000000..080d00f
--- /dev/null
+++ b/media/libstagefright/CCodec.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CCodec"
+#include <utils/Log.h>
+
+// XXX: HACK
+#include "codecs/avcdec/C2SoftAvcDec.h"
+
+#include <thread>
+
+#include <gui/Surface.h>
+#include <media/stagefright/CCodec.h>
+
+#include "include/CCodecBufferChannel.h"
+
+using namespace std::chrono_literals;
+
+namespace android {
+
+namespace {
+
+class CCodecWatchdog : public AHandler {
+private:
+ enum {
+ kWhatRegister,
+ kWhatWatch,
+ };
+ constexpr static int64_t kWatchIntervalUs = 3000000; // 3 secs
+
+public:
+ static sp<CCodecWatchdog> getInstance() {
+ Mutexed<sp<CCodecWatchdog>>::Locked instance(sInstance);
+ if (*instance == nullptr) {
+ *instance = new CCodecWatchdog;
+ (*instance)->init();
+ }
+ return *instance;
+ }
+
+ ~CCodecWatchdog() = default;
+
+ void registerCodec(CCodec *codec) {
+ sp<AMessage> msg = new AMessage(kWhatRegister, this);
+ msg->setPointer("codec", codec);
+ msg->post();
+ }
+
+protected:
+ void onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatRegister: {
+ void *ptr = nullptr;
+ CHECK(msg->findPointer("codec", &ptr));
+ Mutexed<std::list<wp<CCodec>>>::Locked codecs(mCodecs);
+ codecs->emplace_back((CCodec *)ptr);
+ break;
+ }
+
+ case kWhatWatch: {
+ Mutexed<std::list<wp<CCodec>>>::Locked codecs(mCodecs);
+ for (auto it = codecs->begin(); it != codecs->end(); ) {
+ sp<CCodec> codec = it->promote();
+ if (codec == nullptr) {
+ it = codecs->erase(it);
+ continue;
+ }
+ codec->initiateReleaseIfStuck();
+ ++it;
+ }
+ msg->post(kWatchIntervalUs);
+ break;
+ }
+
+ default: {
+ TRESPASS("CCodecWatchdog: unrecognized message");
+ }
+ }
+ }
+
+private:
+ CCodecWatchdog() : mLooper(new ALooper) {}
+
+ void init() {
+ mLooper->setName("CCodecWatchdog");
+ mLooper->registerHandler(this);
+ mLooper->start();
+ (new AMessage(kWhatWatch, this))->post(kWatchIntervalUs);
+ }
+
+ static Mutexed<sp<CCodecWatchdog>> sInstance;
+
+ sp<ALooper> mLooper;
+ Mutexed<std::list<wp<CCodec>>> mCodecs;
+};
+
+Mutexed<sp<CCodecWatchdog>> CCodecWatchdog::sInstance;
+
+class CCodecListener : public C2Component::Listener {
+public:
+ CCodecListener(const std::shared_ptr<CCodecBufferChannel> &channel)
+ : mChannel(channel) {
+ }
+
+ virtual void onWorkDone_nb(
+ std::weak_ptr<C2Component> component,
+ std::vector<std::unique_ptr<C2Work>> workItems) override {
+ (void) component;
+ mChannel->onWorkDone(std::move(workItems));
+ }
+
+ virtual void onTripped_nb(
+ std::weak_ptr<C2Component> component,
+ std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+ // TODO
+ (void) component;
+ (void) settingResult;
+ }
+
+ virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+ // TODO
+ (void) component;
+ (void) errorCode;
+ }
+
+private:
+ std::shared_ptr<CCodecBufferChannel> mChannel;
+};
+
+} // namespace
+
+CCodec::CCodec()
+ : mChannel(new CCodecBufferChannel([this] (status_t err, enum ActionCode actionCode) {
+ mCallback->onError(err, actionCode);
+ })) {
+ CCodecWatchdog::getInstance()->registerCodec(this);
+}
+
+CCodec::~CCodec() {
+}
+
+std::shared_ptr<BufferChannelBase> CCodec::getBufferChannel() {
+ return mChannel;
+}
+
+void CCodec::initiateAllocateComponent(const sp<AMessage> &msg) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RELEASED) {
+ mCallback->onError(INVALID_OPERATION, ACTION_CODE_FATAL);
+ return;
+ }
+ state->mState = ALLOCATING;
+ }
+
+ AString componentName;
+ if (!msg->findString("componentName", &componentName)) {
+ // TODO: find componentName appropriate with the media type
+ }
+
+ sp<AMessage> allocMsg(new AMessage(kWhatAllocate, this));
+ allocMsg->setString("componentName", componentName);
+ allocMsg->post();
+}
+
+void CCodec::allocate(const AString &componentName) {
+ // TODO: use C2ComponentStore to create component
+ mListener.reset(new CCodecListener(mChannel));
+
+ std::shared_ptr<C2Component> comp(new C2SoftAvcDec(componentName.c_str(), 0));
+ comp->setListener_sm(mListener);
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATING) {
+ state->mState = RELEASED;
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = ALLOCATED;
+ state->mComp = comp;
+ }
+ mChannel->setComponent(comp);
+ mCallback->onComponentAllocated(comp->intf()->getName().c_str());
+}
+
+void CCodec::initiateConfigureComponent(const sp<AMessage> &format) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATED) {
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ }
+
+ sp<AMessage> msg(new AMessage(kWhatConfigure, this));
+ msg->setMessage("format", format);
+ msg->post();
+}
+
+void CCodec::configure(const sp<AMessage> &msg) {
+ sp<AMessage> inputFormat(new AMessage);
+ sp<AMessage> outputFormat(new AMessage);
+ if (status_t err = [=] {
+ AString mime;
+ if (!msg->findString("mime", &mime)) {
+ return BAD_VALUE;
+ }
+
+ int32_t encoder;
+ if (!msg->findInt32("encoder", &encoder)) {
+ encoder = false;
+ }
+
+ sp<RefBase> obj;
+ if (msg->findObject("native-window", &obj)) {
+ sp<Surface> surface = static_cast<Surface *>(obj.get());
+ setSurface(surface);
+ }
+
+ // TODO
+
+ return OK;
+ }() != OK) {
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ return;
+ }
+
+ {
+ Mutexed<Formats>::Locked formats(mFormats);
+ formats->mInputFormat = inputFormat;
+ formats->mOutputFormat = outputFormat;
+ }
+ mCallback->onComponentConfigured(inputFormat, outputFormat);
+}
+
+
+void CCodec::initiateCreateInputSurface() {
+ // TODO
+}
+
+void CCodec::initiateSetInputSurface(const sp<PersistentSurface> &surface) {
+ // TODO
+ (void) surface;
+}
+
+void CCodec::initiateStart() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != ALLOCATED) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = STARTING;
+ }
+
+ (new AMessage(kWhatStart, this))->post();
+}
+
+void CCodec::start() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != STARTING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+ c2_status_t err = comp->start();
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ sp<AMessage> inputFormat;
+ sp<AMessage> outputFormat;
+ {
+ Mutexed<Formats>::Locked formats(mFormats);
+ inputFormat = formats->mInputFormat;
+ outputFormat = formats->mOutputFormat;
+ }
+ mChannel->start(inputFormat, outputFormat);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != STARTING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RUNNING;
+ }
+ mCallback->onStartCompleted();
+}
+
+void CCodec::initiateShutdown(bool keepComponentAllocated) {
+ if (keepComponentAllocated) {
+ initiateStop();
+ } else {
+ initiateRelease();
+ }
+}
+
+void CCodec::initiateStop() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == ALLOCATED
+ || state->mState == RELEASED
+ || state->mState == STOPPING
+ || state->mState == RELEASING) {
+ // We're already stopped, released, or doing it right now.
+ state.unlock();
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ }
+ state->mState = STOPPING;
+ }
+
+ (new AMessage(kWhatStop, this))->post();
+}
+
+void CCodec::stop() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASING) {
+ state.unlock();
+ // We're already stopped or release is in progress.
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ } else if (state->mState != STOPPING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+ mChannel->stop();
+ status_t err = comp->stop();
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == STOPPING) {
+ state->mState = ALLOCATED;
+ }
+ }
+ mCallback->onStopCompleted();
+}
+
+void CCodec::initiateRelease(bool sendCallback /* = true */) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASED || state->mState == RELEASING) {
+ // We're already released or doing it right now.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ if (state->mState == ALLOCATING) {
+ state->mState = RELEASING;
+ // With the altered state allocate() would fail and clean up.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ state->mState = RELEASING;
+ }
+
+ std::thread([this, sendCallback] { release(sendCallback); }).detach();
+}
+
+void CCodec::release(bool sendCallback) {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState == RELEASED) {
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ comp = state->mComp;
+ }
+ mChannel->stop();
+ comp->release();
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->mState = RELEASED;
+ state->mComp.reset();
+ }
+ if (sendCallback) {
+ mCallback->onReleaseCompleted();
+ }
+}
+
+status_t CCodec::setSurface(const sp<Surface> &surface) {
+ return mChannel->setSurface(surface);
+}
+
+void CCodec::signalFlush() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RUNNING) {
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ state->mState = FLUSHING;
+ }
+
+ (new AMessage(kWhatFlush, this))->post();
+}
+
+void CCodec::flush() {
+ std::shared_ptr<C2Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != FLUSHING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->mComp;
+ }
+
+ mChannel->stop();
+
+ std::list<std::unique_ptr<C2Work>> flushedWork;
+ c2_status_t err = comp->flush_sm(C2Component::FLUSH_COMPONENT, &flushedWork);
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ mChannel->flush(flushedWork);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->mState = FLUSHED;
+ }
+ mCallback->onFlushCompleted();
+}
+
+void CCodec::signalResume() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != FLUSHED) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RESUMING;
+ }
+
+ mChannel->start(nullptr, nullptr);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->mState != RESUMING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->mState = RUNNING;
+ }
+}
+
+void CCodec::signalSetParameters(const sp<AMessage> &msg) {
+ // TODO
+ (void) msg;
+}
+
+void CCodec::signalEndOfInputStream() {
+}
+
+void CCodec::signalRequestIDRFrame() {
+ // TODO
+}
+
+void CCodec::onMessageReceived(const sp<AMessage> &msg) {
+ TimePoint now = std::chrono::steady_clock::now();
+ switch (msg->what()) {
+ case kWhatAllocate: {
+ // C2ComponentStore::createComponent() should return within 100ms.
+ setDeadline(now + 150ms);
+ AString componentName;
+ CHECK(msg->findString("componentName", &componentName));
+ allocate(componentName);
+ break;
+ }
+ case kWhatConfigure: {
+ // C2Component::commit_sm() should return within 5ms.
+ setDeadline(now + 50ms);
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+ configure(format);
+ break;
+ }
+ case kWhatStart: {
+ // C2Component::start() should return within 500ms.
+ setDeadline(now + 550ms);
+ start();
+ break;
+ }
+ case kWhatStop: {
+ // C2Component::stop() should return within 500ms.
+ setDeadline(now + 550ms);
+ stop();
+ break;
+ }
+ case kWhatFlush: {
+ // C2Component::flush_sm() should return within 5ms.
+ setDeadline(now + 50ms);
+ flush();
+ break;
+ }
+ default: {
+ ALOGE("unrecognized message");
+ break;
+ }
+ }
+ setDeadline(TimePoint::max());
+}
+
+void CCodec::setDeadline(const TimePoint &newDeadline) {
+ Mutexed<TimePoint>::Locked deadline(mDeadline);
+ *deadline = newDeadline;
+}
+
+void CCodec::initiateReleaseIfStuck() {
+ {
+ Mutexed<TimePoint>::Locked deadline(mDeadline);
+ if (*deadline >= std::chrono::steady_clock::now()) {
+ // We're not stuck.
+ return;
+ }
+ }
+
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ initiateRelease();
+}
+
+} // namespace android
diff --git a/media/libstagefright/CCodecBufferChannel.cpp b/media/libstagefright/CCodecBufferChannel.cpp
new file mode 100644
index 0000000..9868cd4
--- /dev/null
+++ b/media/libstagefright/CCodecBufferChannel.cpp
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CCodecBufferChannel"
+#include <utils/Log.h>
+
+#include <numeric>
+#include <thread>
+
+#include <C2PlatformSupport.h>
+
+#include <android/hardware/cas/native/1.0/IDescrambler.h>
+#include <binder/MemoryDealer.h>
+#include <gui/Surface.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/MediaCodecBuffer.h>
+#include <system/window.h>
+
+#include "include/CCodecBufferChannel.h"
+#include "include/Codec2Buffer.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+
+using hardware::hidl_handle;
+using hardware::hidl_string;
+using hardware::hidl_vec;
+using namespace hardware::cas::V1_0;
+using namespace hardware::cas::native::V1_0;
+
+// TODO: get this info from component
+const static size_t kMinBufferArraySize = 16;
+
+void CCodecBufferChannel::OutputBuffers::flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) {
+ (void) flushedWork;
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+}
+
+namespace {
+
+template <class T>
+ssize_t findBufferSlot(
+ std::vector<T> *buffers,
+ size_t maxSize,
+ std::function<bool(const T&)> pred) {
+ auto it = std::find_if(buffers->begin(), buffers->end(), pred);
+ if (it == buffers->end()) {
+ if (buffers->size() < maxSize) {
+ buffers->emplace_back();
+ return buffers->size() - 1;
+ } else {
+ return -1;
+ }
+ }
+ return std::distance(buffers->begin(), it);
+}
+
+class LinearBuffer : public C2Buffer {
+public:
+ explicit LinearBuffer(C2ConstLinearBlock block) : C2Buffer({ block }) {}
+};
+
+class LinearInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ using CCodecBufferChannel::InputBuffers::InputBuffers;
+
+ virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ *buffer = nullptr;
+ ssize_t ret = findBufferSlot<wp<Codec2Buffer>>(
+ &mBuffers, kMinBufferArraySize,
+ [] (const auto &elem) { return elem.promote() == nullptr; });
+ if (ret < 0) {
+ return false;
+ }
+ std::shared_ptr<C2LinearBlock> block;
+
+ status_t err = mAlloc->fetchLinearBlock(
+ // TODO: proper max input size
+ 65536,
+ { 0, C2MemoryUsage::kSoftwareWrite },
+ &block);
+ if (err != OK) {
+ return false;
+ }
+
+ sp<Codec2Buffer> newBuffer = Codec2Buffer::allocate(mFormat, block);
+ mBuffers[ret] = newBuffer;
+ *index = ret;
+ *buffer = newBuffer;
+ return true;
+ }
+
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ auto it = std::find(mBuffers.begin(), mBuffers.end(), buffer);
+ if (it == mBuffers.end()) {
+ return nullptr;
+ }
+ sp<Codec2Buffer> codecBuffer = it->promote();
+ // We got sp<> reference from the caller so this should never happen..
+ CHECK(codecBuffer != nullptr);
+ return std::make_shared<LinearBuffer>(codecBuffer->share());
+ }
+
+ virtual void flush() override {
+ }
+
+private:
+ // Buffers we passed to the client. The index of a buffer matches what
+ // was passed in BufferCallback::onInputBufferAvailable().
+ std::vector<wp<Codec2Buffer>> mBuffers;
+
+ // Buffer array we passed to the client. This only gets initialized at
+ // getInput/OutputBufferArray() and when this is set we can't add more
+ // buffers.
+ std::vector<sp<Codec2Buffer>> mBufferArray;
+};
+
+class GraphicOutputBuffers : public CCodecBufferChannel::OutputBuffers {
+public:
+ using CCodecBufferChannel::OutputBuffers::OutputBuffers;
+
+ virtual bool registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) override {
+ *codecBuffer = nullptr;
+ ssize_t ret = findBufferSlot<BufferInfo>(
+ &mBuffers,
+ kMinBufferArraySize,
+ [] (const auto &elem) { return elem.mClientBuffer.promote() == nullptr; });
+ if (ret < 0) {
+ return false;
+ }
+ sp<MediaCodecBuffer> newBuffer = new MediaCodecBuffer(
+ mFormat,
+ buffer == nullptr ? kEmptyBuffer : kDummyBuffer);
+ mBuffers[ret] = { newBuffer, buffer };
+ *index = ret;
+ *codecBuffer = newBuffer;
+ return true;
+ }
+
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ auto it = std::find_if(
+ mBuffers.begin(), mBuffers.end(),
+ [buffer] (const auto &elem) {
+ return elem.mClientBuffer.promote() == buffer;
+ });
+ if (it == mBuffers.end()) {
+ return nullptr;
+ }
+ return it->mBufferRef;
+ }
+
+private:
+ static const sp<ABuffer> kEmptyBuffer;
+ static const sp<ABuffer> kDummyBuffer;
+
+ struct BufferInfo {
+ // wp<> of MediaCodecBuffer for MediaCodec.
+ wp<MediaCodecBuffer> mClientBuffer;
+ // Buffer reference to hold until mClientBuffer is valid.
+ std::shared_ptr<C2Buffer> mBufferRef;
+ };
+ // Buffers we passed to the client. The index of a buffer matches what
+ // was passed in BufferCallback::onInputBufferAvailable().
+ std::vector<BufferInfo> mBuffers;
+};
+
+const sp<ABuffer> GraphicOutputBuffers::kEmptyBuffer = new ABuffer(nullptr, 0);
+const sp<ABuffer> GraphicOutputBuffers::kDummyBuffer = new ABuffer(nullptr, 1);
+
+} // namespace
+
+CCodecBufferChannel::QueueGuard::QueueGuard(
+ CCodecBufferChannel::QueueSync &sync) : mSync(sync) {
+ std::unique_lock<std::mutex> l(mSync.mMutex);
+ if (mSync.mCount == -1) {
+ mRunning = false;
+ } else {
+ ++mSync.mCount;
+ mRunning = true;
+ }
+}
+
+CCodecBufferChannel::QueueGuard::~QueueGuard() {
+ if (mRunning) {
+ --mSync.mCount;
+ }
+}
+
+void CCodecBufferChannel::QueueSync::start() {
+ std::unique_lock<std::mutex> l(mMutex);
+ // If stopped, it goes to running state; otherwise no-op.
+ int32_t expected = -1;
+ mCount.compare_exchange_strong(expected, 0);
+}
+
+void CCodecBufferChannel::QueueSync::stop() {
+ std::unique_lock<std::mutex> l(mMutex);
+ if (mCount == -1) {
+ // no-op
+ return;
+ }
+ int32_t expected = 0;
+ while (!mCount.compare_exchange_weak(expected, -1)) {
+ std::this_thread::yield();
+ }
+}
+
+CCodecBufferChannel::CCodecBufferChannel(
+ const std::function<void(status_t, enum ActionCode)> &onError)
+ : mOnError(onError),
+ mInputBuffers(new LinearInputBuffers),
+ mOutputBuffers(new GraphicOutputBuffers),
+ mFrameIndex(0u),
+ mFirstValidFrameIndex(0u) {
+}
+
+CCodecBufferChannel::~CCodecBufferChannel() {
+ if (mCrypto != nullptr && mDealer != nullptr && mHeapSeqNum >= 0) {
+ mCrypto->unsetHeap(mHeapSeqNum);
+ }
+}
+
+void CCodecBufferChannel::setComponent(const std::shared_ptr<C2Component> &component) {
+ mComponent = component;
+ // TODO: get pool ID from params
+ std::shared_ptr<C2BlockPool> pool;
+ c2_status_t err = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, component, &pool);
+ if (err == C2_OK) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->setAlloc(pool);
+ }
+}
+
+status_t CCodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGW("No more buffers should be queued at current state.");
+ return -ENOSYS;
+ }
+
+ int64_t timeUs;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ int32_t flags = 0;
+ int32_t tmp = 0;
+ if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
+ flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ ALOGV("input EOS");
+ }
+ if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
+ flags |= C2BufferPack::FLAG_CODEC_CONFIG;
+ }
+ std::unique_ptr<C2Work> work(new C2Work);
+ work->input.flags = (C2BufferPack::flags_t)flags;
+ work->input.ordinal.timestamp = timeUs;
+ work->input.ordinal.frame_index = mFrameIndex++;
+ work->input.buffers.clear();
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ work->input.buffers.push_back((*buffers)->releaseBuffer(buffer));
+ }
+ // TODO: fill info's
+
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+
+ std::list<std::unique_ptr<C2Work>> items;
+ items.push_back(std::move(work));
+ return mComponent->queue_nb(&items);
+}
+
+status_t CCodecBufferChannel::queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer, bool secure, const uint8_t *key,
+ const uint8_t *iv, CryptoPlugin::Mode mode, CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ AString *errorDetailMsg) {
+ // TODO
+ (void) buffer;
+ (void) secure;
+ (void) key;
+ (void) iv;
+ (void) mode;
+ (void) pattern;
+ (void) subSamples;
+ (void) numSubSamples;
+ (void) errorDetailMsg;
+ return -ENOSYS;
+}
+
+status_t CCodecBufferChannel::renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+ ALOGV("renderOutputBuffer");
+ sp<MediaCodecBuffer> inBuffer;
+ size_t index;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
+ inBuffer = nullptr;
+ }
+ }
+ if (inBuffer != nullptr) {
+ mCallback->onInputBufferAvailable(index, inBuffer);
+ }
+
+ std::shared_ptr<C2Buffer> c2Buffer;
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ c2Buffer = (*buffers)->releaseBuffer(buffer);
+ }
+
+ Mutexed<sp<Surface>>::Locked surface(mSurface);
+ if (*surface == nullptr) {
+ ALOGE("no surface");
+ return OK;
+ }
+
+ std::list<C2ConstGraphicBlock> blocks = c2Buffer->data().graphicBlocks();
+ if (blocks.size() != 1u) {
+ ALOGE("# of graphic blocks expected to be 1, but %zu", blocks.size());
+ return UNKNOWN_ERROR;
+ }
+
+ sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(
+ blocks.front().handle(),
+ GraphicBuffer::CLONE_HANDLE,
+ blocks.front().width(),
+ blocks.front().height(),
+ HAL_PIXEL_FORMAT_YV12,
+ // TODO
+ 1,
+ (uint64_t)GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ // TODO
+ blocks.front().width()));
+
+ status_t result = (*surface)->attachBuffer(graphicBuffer.get());
+ if (result != OK) {
+ ALOGE("attachBuffer failed: %d", result);
+ return result;
+ }
+
+ // TODO: read and set crop
+
+ result = native_window_set_buffers_timestamp((*surface).get(), timestampNs);
+ ALOGW_IF(result != OK, "failed to set buffer timestamp: %d", result);
+
+ // TODO: fix after C2Fence implementation
+#if 0
+ const C2Fence &fence = blocks.front().fence();
+ result = ((ANativeWindow *)(*surface).get())->queueBuffer(
+ (*surface).get(), graphicBuffer.get(), fence.valid() ? fence.fd() : -1);
+#else
+ result = ((ANativeWindow *)(*surface).get())->queueBuffer(
+ (*surface).get(), graphicBuffer.get(), -1);
+#endif
+ if (result != OK) {
+ ALOGE("queueBuffer failed: %d", result);
+ return result;
+ }
+
+ return OK;
+}
+
+status_t CCodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
+ ALOGV("discardBuffer");
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (void) (*buffers)->releaseBuffer(buffer);
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (void) (*buffers)->releaseBuffer(buffer);
+ }
+ return OK;
+}
+
+#if 0
+void fillBufferArray_l(Mutexed<Buffers>::Locked &buffers) {
+ for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
+ sp<Codec2Buffer> buffer(buffers->mClientBuffer.get(i).promote());
+ if (buffer == nullptr) {
+ buffer = allocateBuffer_l(buffers->mAlloc);
+ }
+ buffers->mBufferArray.push_back(buffer);
+ }
+ while (buffers->mBufferArray.size() < kMinBufferArraySize) {
+ sp<Codec2Buffer> buffer = allocateBuffer_l(buffers->mAlloc);
+ // allocate buffer
+ buffers->mBufferArray.push_back(buffer);
+ }
+}
+#endif
+
+void CCodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ (void) array;
+ // TODO
+#if 0
+ array->clear();
+ Mutexed<Buffers>::Locked buffers(mInputBuffers);
+
+ if (!buffers->isArrayMode()) {
+ // mBufferArray is empty.
+ fillBufferArray_l(buffers);
+ }
+
+ for (const auto &buffer : buffers->mBufferArray) {
+ array->push_back(buffer);
+ }
+#endif
+}
+
+void CCodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ (void) array;
+ // TODO
+#if 0
+ array->clear();
+ Mutexed<Buffers>::Locked buffers(mOutputBuffers);
+
+ if (!buffers->isArrayMode()) {
+ if (linear) {
+ // mBufferArray is empty.
+ fillBufferArray_l(buffers);
+
+ // We need to replace the allocator so that the component only returns
+ // buffer from the array.
+ ArrayModeAllocator::Builder builder(buffers->mBufferArray);
+ for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
+ if (buffers->mClientBuffer.get(i).promote() != nullptr) {
+ builder.markUsing(i);
+ }
+ }
+ buffers->mAlloc.reset(builder.build());
+ } else {
+ for (int i = 0; i < X; ++i) {
+ buffers->mBufferArray.push_back(dummy buffer);
+ }
+ }
+ }
+
+ for (const auto &buffer : buffers->mBufferArray) {
+ array->push_back(buffer);
+ }
+#endif
+}
+
+void CCodecBufferChannel::start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+ if (inputFormat != nullptr) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->setFormat(inputFormat);
+ }
+ if (outputFormat != nullptr) {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (*buffers)->setFormat(outputFormat);
+ }
+
+ mSync.start();
+ // TODO: use proper buffer depth instead of this random value
+ for (size_t i = 0; i < kMinBufferArraySize; ++i) {
+ size_t index;
+ sp<MediaCodecBuffer> buffer;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &buffer)) {
+ buffers.unlock();
+ ALOGE("start: cannot allocate memory");
+ mOnError(NO_MEMORY, ACTION_CODE_FATAL);
+ buffers.lock();
+ return;
+ }
+ }
+ mCallback->onInputBufferAvailable(index, buffer);
+ }
+}
+
+void CCodecBufferChannel::stop() {
+ mSync.stop();
+ mFirstValidFrameIndex = mFrameIndex.load();
+}
+
+void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->flush();
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (*buffers)->flush(flushedWork);
+ }
+}
+
+void CCodecBufferChannel::onWorkDone(std::vector<std::unique_ptr<C2Work>> workItems) {
+ for (const auto &work : workItems) {
+ if (work->result != OK) {
+ ALOGE("work failed to complete: %d", work->result);
+ mOnError(work->result, ACTION_CODE_FATAL);
+ return;
+ }
+
+ // NOTE: MediaCodec usage supposedly have only one worklet
+ if (work->worklets.size() != 1u) {
+ ALOGE("incorrect number of worklets: %zu", work->worklets.size());
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ const std::unique_ptr<C2Worklet> &worklet = work->worklets.front();
+ if (worklet->output.ordinal.frame_index < mFirstValidFrameIndex) {
+ // Discard frames from previous generation.
+ continue;
+ }
+ // NOTE: MediaCodec usage supposedly have only one output stream.
+ if (worklet->output.buffers.size() != 1u) {
+ ALOGE("incorrect number of output buffers: %zu", worklet->output.buffers.size());
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ const std::shared_ptr<C2Buffer> &buffer = worklet->output.buffers[0];
+ // TODO: transfer infos() into buffer metadata
+
+ int32_t flags = 0;
+ if (worklet->output.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ ALOGV("output EOS");
+ }
+
+ size_t index;
+ sp<MediaCodecBuffer> outBuffer;
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if (!(*buffers)->registerBuffer(buffer, &index, &outBuffer)) {
+ ALOGE("unable to register output buffer");
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ continue;
+ }
+
+ outBuffer->meta()->setInt64("timeUs", worklet->output.ordinal.timestamp);
+ outBuffer->meta()->setInt32("flags", flags);
+ ALOGV("index = %zu", index);
+ mCallback->onOutputBufferAvailable(index, outBuffer);
+ }
+}
+
+status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) {
+ if (newSurface != nullptr) {
+ newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ }
+
+ Mutexed<sp<Surface>>::Locked surface(mSurface);
+// if (newSurface == nullptr) {
+// if (*surface != nullptr) {
+// ALOGW("cannot unset a surface");
+// return INVALID_OPERATION;
+// }
+// return OK;
+// }
+//
+// if (*surface == nullptr) {
+// ALOGW("component was not configured with a surface");
+// return INVALID_OPERATION;
+// }
+
+ *surface = newSurface;
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 4fedab6..677d25a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -28,6 +28,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
#include <gui/BufferQueue.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -44,6 +45,7 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/CCodec.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
@@ -549,8 +551,11 @@
//static
sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, bool nameIsType) {
- // at this time only ACodec specifies a mime type.
- if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+ static bool ccodecEnabled = property_get_bool("debug.stagefright.ccodec", false);
+ if (ccodecEnabled && !nameIsType && name.startsWithIgnoreCase("codec2.")) {
+ return new CCodec;
+ } else if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+ // at this time only ACodec specifies a mime type.
return new ACodec;
} else if (name.startsWithIgnoreCase("android.filter.")) {
return new MediaFilter;
@@ -1849,7 +1854,6 @@
}
}
}
-
if (mFlags & kFlagIsAsync) {
onOutputFormatChanged();
} else {
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 67e6748..9b2fb4f 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleDecodingSource"
+#include <utils/Log.h>
+
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -43,7 +47,7 @@
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
const sp<MediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec) {
+ const char *desiredCodec, bool skipMediaCodecList) {
sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
const char *mime = NULL;
sp<MetaData> meta = source->getFormat();
@@ -63,6 +67,33 @@
looper->start();
sp<MediaCodec> codec;
+ auto configure = [=](const sp<MediaCodec> &codec, const AString &componentName)
+ -> sp<SimpleDecodingSource> {
+ if (codec != NULL) {
+ ALOGI("Successfully allocated codec '%s'", componentName.c_str());
+
+ status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
+ sp<AMessage> outFormat;
+ if (err == OK) {
+ err = codec->getOutputFormat(&outFormat);
+ }
+ if (err == OK) {
+ return new SimpleDecodingSource(codec, source, looper,
+ surface != NULL,
+ strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
+ outFormat);
+ }
+
+ ALOGD("Failed to configure codec '%s'", componentName.c_str());
+ codec->release();
+ }
+ return NULL;
+ };
+
+ if (skipMediaCodecList) {
+ codec = MediaCodec::CreateByComponentName(looper, desiredCodec);
+ return configure(codec, desiredCodec);
+ }
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
@@ -73,22 +104,10 @@
ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
codec = MediaCodec::CreateByComponentName(looper, componentName);
- if (codec != NULL) {
- ALOGI("Successfully allocated codec '%s'", componentName.c_str());
-
- status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
- if (err == OK) {
- err = codec->getOutputFormat(&format);
- }
- if (err == OK) {
- return new SimpleDecodingSource(codec, source, looper,
- surface != NULL,
- strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
- format);
- }
-
- ALOGD("Failed to configure codec '%s'", componentName.c_str());
- codec->release();
+ sp<SimpleDecodingSource> res = configure(codec, componentName);
+ if (res != NULL) {
+ return res;
+ } else {
codec = NULL;
}
}
diff --git a/media/libstagefright/codec2/Android.bp b/media/libstagefright/codec2/Android.bp
index 311a20b..696a062 100644
--- a/media/libstagefright/codec2/Android.bp
+++ b/media/libstagefright/codec2/Android.bp
@@ -7,11 +7,20 @@
srcs: ["C2.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/codec2/include",
"frameworks/native/include/media/hardware",
],
+ export_include_dirs: [
+ "include",
+ ],
+
sanitize: {
misc_undefined: [
"unsigned-integer-overflow",
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
index 2dbf7ea..a555b35 100644
--- a/media/libstagefright/codec2/include/C2Component.h
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -360,6 +360,7 @@
C2DomainKind domain; ///< component domain (e.g. audio or video)
C2ComponentKind type; ///< component type (e.g. encoder, decoder or filter)
C2StringLiteral mediaType; ///< media type supported by the component
+ C2ComponentPriority priority; ///< priority used to determine component ordering
/**
* name alias(es) for backward compatibility.
@@ -569,7 +570,6 @@
*/
virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
-protected:
virtual ~C2Component() = default;
};
@@ -724,11 +724,11 @@
/**
* Returns the list of components supported by this component store.
*
- * This method may be momentarily blocking, but MUST return within 5ms.
+ * This method MUST return within 500ms.
*
* \retval vector of component information.
*/
- virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents_sm() const = 0;
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() = 0;
// -------------------------------------- UTILITY METHODS --------------------------------------
diff --git a/media/libstagefright/codec2/vndk/Android.bp b/media/libstagefright/codec2/vndk/Android.bp
index 64ce5e6..d2cfebb 100644
--- a/media/libstagefright/codec2/vndk/Android.bp
+++ b/media/libstagefright/codec2/vndk/Android.bp
@@ -9,6 +9,10 @@
"C2Store.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/codec2/include",
"frameworks/av/media/libstagefright/codec2/vndk/include",
diff --git a/media/libstagefright/codec2/vndk/C2Store.cpp b/media/libstagefright/codec2/vndk/C2Store.cpp
index 73ffaea..460cc60 100644
--- a/media/libstagefright/codec2/vndk/C2Store.cpp
+++ b/media/libstagefright/codec2/vndk/C2Store.cpp
@@ -20,12 +20,26 @@
#include <C2Component.h>
#include <C2PlatformSupport.h>
+#define LOG_TAG "C2Store"
+#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <dlfcn.h>
+
#include <map>
#include <memory>
#include <mutex>
namespace android {
+/**
+ * The platform allocator store provides basic allocator-types for the framework based on ion and
+ * gralloc. Allocators are not meant to be updatable.
+ *
+ * \todo Provide allocator based on ashmem
+ * \todo Move ion allocation into its HIDL or provide some mapping from memory usage to ion flags
+ * \todo Make this allocator store extendable
+ */
class C2PlatformAllocatorStore : public C2AllocatorStore {
public:
enum : id_t {
@@ -37,9 +51,11 @@
/* ionmapper */
);
- virtual c2_status_t fetchAllocator(id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
+ virtual c2_status_t fetchAllocator(
+ id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
- virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb() const override {
+ virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb()
+ const override {
return std::vector<std::shared_ptr<const C2Allocator::Traits>>(); /// \todo
}
@@ -48,10 +64,10 @@
}
private:
- // returns a shared-singleton ion allocator
+ /// returns a shared-singleton ion allocator
std::shared_ptr<C2Allocator> fetchIonAllocator();
- // returns a shared-singleton gralloc allocator
+ /// returns a shared-singleton gralloc allocator
std::shared_ptr<C2Allocator> fetchGrallocAllocator();
};
@@ -141,4 +157,385 @@
return res;
}
-} // namespace android
\ No newline at end of file
+class C2PlatformComponentStore : public C2ComponentStore {
+public:
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() override;
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const override;
+ virtual C2String getName() const override;
+ virtual c2_status_t querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const override;
+ virtual c2_status_t querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const override;
+ virtual c2_status_t query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const override;
+ virtual c2_status_t createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) override;
+ virtual c2_status_t createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) override;
+ virtual c2_status_t copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) override;
+ virtual c2_status_t config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+ virtual c2_status_t commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+
+ C2PlatformComponentStore();
+
+ virtual ~C2PlatformComponentStore() override = default;
+
+private:
+
+ /**
+ * An object encapsulating a loaded component module.
+ *
+ * \todo provide a way to add traits to known components here to avoid loading the .so-s
+ * for listComponents
+ */
+ struct ComponentModule : public C2ComponentFactory,
+ public std::enable_shared_from_this<ComponentModule> {
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) override;
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) override;
+
+ /**
+ * \returns the traits of the component in this module.
+ */
+ std::shared_ptr<const C2Component::Traits> getTraits();
+
+ /**
+ * Creates an uninitialized component module.
+ *
+ * \note Only used by ComponentLoader.
+ */
+ ComponentModule() : mInit(C2_NO_INIT) {}
+
+ /**
+ * Initializes a component module with a given library path. Must be called exactly once.
+ *
+ * \note Only used by ComponentLoader.
+ *
+ * \param libPath[in] library path (or name)
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded (unexpected)
+ * \retval C2_REFUSED permission denied to load the component module (unexpected)
+ * \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
+ */
+ c2_status_t init(std::string libPath);
+
+ virtual ~ComponentModule() override;
+
+ protected:
+ std::recursive_mutex mLock; ///< lock protecting mTraits
+ std::shared_ptr<C2Component::Traits> mTraits; ///< cached component traits
+
+ c2_status_t mInit; ///< initialization result
+
+ void *mLibHandle; ///< loaded library handle
+ C2ComponentFactory::CreateCodec2FactoryFunc createFactory; ///< loaded create function
+ C2ComponentFactory::DestroyCodec2FactoryFunc destroyFactory; ///< loaded destroy function
+ C2ComponentFactory *mComponentFactory; ///< loaded/created component factory
+ };
+
+ /**
+ * An object encapsulating a loadable component module.
+ *
+ * \todo make this also work for enumerations
+ */
+ struct ComponentLoader {
+ /**
+ * Load the component module.
+ *
+ * This method simply returns the component module if it is already currently loaded, or
+ * attempts to load it if it is not.
+ *
+ * \param module[out] pointer to the shared pointer where the loaded module shall be stored.
+ * This will be nullptr on error.
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded
+ * \retval C2_REFUSED permission denied to load the component module
+ */
+ c2_status_t fetchModule(std::shared_ptr<ComponentModule> *module) {
+ c2_status_t res = C2_OK;
+ std::lock_guard<std::mutex> lock(mMutex);
+ std::shared_ptr<ComponentModule> localModule = mModule.lock();
+ if (localModule == nullptr) {
+ localModule = std::make_shared<ComponentModule>();
+ res = localModule->init(mLibPath);
+ if (res == C2_OK) {
+ mModule = localModule;
+ }
+ }
+ *module = localModule;
+ return res;
+ }
+
+ /**
+ * Creates a component loader for a specific library path (or name).
+ */
+ ComponentLoader(std::string libPath)
+ : mLibPath(libPath) {}
+
+ private:
+ std::mutex mMutex; ///< mutex guarding the module
+ std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
+ std::string mLibPath; ///< library path (or name)
+ };
+
+ /**
+ * Retrieves the component loader for a component.
+ *
+ * \return a non-ref-holding pointer to the component loader.
+ *
+ * \retval C2_OK the component loader has been successfully retrieved
+ * \retval C2_NO_MEMORY not enough memory to locate the component loader
+ * \retval C2_NOT_FOUND could not locate the component to be loaded
+ * \retval C2_CORRUPTED the component loader could not be identified due to some modules being
+ * corrupted (this can happen if the name does not refer to an already
+ * identified component but some components could not be loaded due to
+ * bad library)
+ * \retval C2_REFUSED permission denied to find the component loader for the named component
+ * (this can happen if the name does not refer to an already identified
+ * component but some components could not be loaded due to lack of
+ * permissions)
+ */
+ c2_status_t findComponent(C2String name, ComponentLoader **loader);
+
+ std::map<C2String, ComponentLoader> mComponents; ///< list of components
+};
+
+c2_status_t C2PlatformComponentStore::ComponentModule::init(std::string libPath) {
+ ALOGV("in %s", __func__);
+ ALOGV("loading dll");
+ mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
+ if (mLibHandle == nullptr) {
+ // could be access/symbol or simply not being there
+ ALOGD("could not dlopen %s: %s", libPath.c_str(), dlerror());
+ mInit = C2_CORRUPTED;
+ } else {
+ createFactory =
+ (C2ComponentFactory::CreateCodec2FactoryFunc)dlsym(mLibHandle, "CreateCodec2Factory");
+ destroyFactory =
+ (C2ComponentFactory::DestroyCodec2FactoryFunc)dlsym(mLibHandle, "DestroyCodec2Factory");
+
+ mComponentFactory = createFactory();
+ if (mComponentFactory == nullptr) {
+ ALOGD("could not create factory in %s", libPath.c_str());
+ mInit = C2_NO_MEMORY;
+ } else {
+ mInit = C2_OK;
+ }
+ }
+ return mInit;
+}
+
+C2PlatformComponentStore::ComponentModule::~ComponentModule() {
+ ALOGV("in %s", __func__);
+ if (destroyFactory && mComponentFactory) {
+ destroyFactory(mComponentFactory);
+ }
+ if (mLibHandle) {
+ ALOGV("unloading dll");
+ dlclose(mLibHandle);
+ }
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) {
+ interface->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createInterface(
+ id, interface, [module, deleter](C2ComponentInterface *p) mutable {
+ // capture module so that we ensure we still have it while deleting interface
+ deleter(p); // delete interface first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ std::function<void(::android::C2Component*)> deleter) {
+ component->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createComponent(
+ id, component, [module, deleter](C2Component *p) mutable {
+ // capture module so that we ensure we still have it while deleting component
+ deleter(p); // delete component first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+std::shared_ptr<const C2Component::Traits> C2PlatformComponentStore::ComponentModule::getTraits() {
+ std::unique_lock<std::recursive_mutex> lock(mLock);
+ if (!mTraits) {
+ std::shared_ptr<C2ComponentInterface> intf;
+ c2_status_t res = createInterface(0, &intf);
+ if (res != C2_OK) {
+ return nullptr;
+ }
+
+ std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
+ if (traits) {
+ // traits->name = intf->getName();
+ }
+
+ mTraits = traits;
+ }
+ return mTraits;
+}
+
+C2PlatformComponentStore::C2PlatformComponentStore() {
+ // TODO: move this also into a .so so it can be updated
+ mComponents.emplace("c2.google.avc.decoder", "libstagefright_soft_c2avcdec.so");
+}
+
+c2_status_t C2PlatformComponentStore::copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) {
+ (void)src;
+ (void)dst;
+ return C2_OMITTED;
+}
+
+c2_status_t C2PlatformComponentStore::query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+ // there are no supported configs
+ (void)heapParams;
+ return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+ // This method SHALL return within 500ms.
+ std::vector<std::shared_ptr<const C2Component::Traits>> list;
+ for (auto &it : mComponents) {
+ ComponentLoader &loader = it.second;
+ std::shared_ptr<ComponentModule> module;
+ c2_status_t res = loader.fetchModule(&module);
+ if (res == C2_OK) {
+ std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
+ if (traits) {
+ list.push_back(traits);
+ }
+ }
+ }
+ return list;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
+ *loader = nullptr;
+ auto pos = mComponents.find(name);
+ // TODO: check aliases
+ if (pos == mComponents.end()) {
+ return C2_NOT_FOUND;
+ }
+ *loader = &pos->second;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) {
+ // This method SHALL return within 100ms.
+ component->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createComponent(0, component);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
+ // This method SHALL return within 100ms.
+ interface->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createInterface(0, interface);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+ // there are no supported config params
+ (void)params;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const {
+ // there are no supported config params
+ return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+C2String C2PlatformComponentStore::getName() const {
+ return "android.componentStore.platform";
+}
+
+std::shared_ptr<C2ParamReflector> C2PlatformComponentStore::getParamReflector() const {
+ // TODO
+ return nullptr;
+}
+
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore() {
+ static std::mutex mutex;
+ static std::weak_ptr<C2ComponentStore> platformStore;
+ std::lock_guard<std::mutex> lock(mutex);
+ std::shared_ptr<C2ComponentStore> store = platformStore.lock();
+ if (store == nullptr) {
+ store = std::make_shared<C2PlatformComponentStore>();
+ platformStore = store;
+ }
+ return store;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
index 8e45705..2281dab 100644
--- a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
+++ b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
@@ -19,6 +19,7 @@
#include <C2Component.h>
+#include <functional>
#include <memory>
namespace android {
@@ -64,14 +65,17 @@
*/
class C2ComponentFactory {
public:
+ typedef std::function<void(::android::C2Component*)> ComponentDeleter;
+ typedef std::function<void(::android::C2ComponentInterface*)> InterfaceDeleter;
+
/**
* Creates a component.
*
* This method SHALL return within 100ms.
*
+ * \param id component ID for the created component
* \param component shared pointer where the created component is stored. Cleared on
* failure and updated on success.
- * \param id component ID for the created component
*
* \retval C2_OK the component was created successfully
* \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
@@ -80,16 +84,17 @@
* \retval C2_NO_MEMORY not enough memory to create the component
*/
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) = 0;
/**
* Creates a component interface.
*
* This method SHALL return within 100ms.
*
+ * \param id component interface ID for the created interface
* \param interface shared pointer where the created interface is stored. Cleared on
* failure and updated on success.
- * \param id component interface ID for the created interface
*
* \retval C2_OK the component interface was created successfully
* \retval C2_TIMED_OUT could not create the component interface within the time limit
@@ -100,11 +105,22 @@
* \retval C2_NO_MEMORY not enough memory to create the component interface
*/
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) = 0;
virtual ~C2ComponentFactory() = default;
+
+ typedef ::android::C2ComponentFactory* (*CreateCodec2FactoryFunc)(void);
+ typedef void (*DestroyCodec2FactoryFunc)(::android::C2ComponentFactory*);
};
+/**
+ * Returns the platform component store.
+ * \retval nullptr if the platform component store could not be obtained
+ */
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore();
+
+
} // namespace android
#endif // STAGEFRIGHT_CODEC2_PLATFORM_SUPPORT_H_
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
index c74ca6d..2423629 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
@@ -261,7 +261,7 @@
mFrameRate(0u, 0),
mBlocksPerSecond(0u, 0),
mParamReflector(new ParamReflector) {
-
+ ALOGV("in %s", __func__);
mInputPortMime = C2PortMimeConfig::input::alloc_unique(strlen(CODEC_MIME_TYPE) + 1);
strcpy(mInputPortMime->m.mValue, CODEC_MIME_TYPE);
mOutputPortMime = C2PortMimeConfig::output::alloc_unique(strlen(MEDIA_MIMETYPE_VIDEO_RAW) + 1);
@@ -430,6 +430,10 @@
false, "_output_block_pools", mOutputBlockPools.get()));
}
+C2SoftAvcDecIntf::~C2SoftAvcDecIntf() {
+ ALOGV("in %s", __func__);
+}
+
C2String C2SoftAvcDecIntf::getName() const {
return mName;
}
@@ -653,6 +657,7 @@
mWidth(320),
mHeight(240),
mInputOffset(0) {
+ ALOGV("in %s", __func__);
GETTIME(&mTimeStart, NULL);
// If input dump is enabled, then open create an empty file
@@ -661,6 +666,7 @@
}
C2SoftAvcDec::~C2SoftAvcDec() {
+ ALOGV("in %s", __func__);
CHECK_EQ(deInitDecoder(), (status_t)OK);
}
@@ -790,6 +796,7 @@
}
void C2SoftAvcDec::processQueue() {
+#if 0
if (mIsInFlush) {
setFlushMode();
@@ -825,9 +832,10 @@
}
mIsInFlush = false;
}
+#endif
std::unique_ptr<C2Work> work;
- {
+ if (!mIsInFlush) {
std::unique_lock<std::mutex> lock(mQueueLock);
if (mQueue.empty()) {
mQueueCond.wait(lock);
@@ -844,7 +852,7 @@
process(work);
std::vector<std::unique_ptr<C2Work>> done;
- {
+ if (work) {
std::unique_lock<std::mutex> lock(mPendingLock);
uint32_t index = work->input.ordinal.frame_index;
mPendingWork[index].swap(work);
@@ -871,12 +879,12 @@
static void *ivd_aligned_malloc(void *ctxt, WORD32 alignment, WORD32 size) {
- UNUSED(ctxt);
+ (void) ctxt;
return memalign(alignment, size);
}
static void ivd_aligned_free(void *ctxt, void *buf) {
- UNUSED(ctxt);
+ (void) ctxt;
free(buf);
return;
}
@@ -1001,6 +1009,7 @@
}
status_t C2SoftAvcDec::setFlushMode() {
+ ALOGV("setFlushMode");
IV_API_CALL_STATUS_T status;
ivd_ctl_flush_ip_t s_video_flush_ip;
ivd_ctl_flush_op_t s_video_flush_op;
@@ -1019,7 +1028,7 @@
s_video_flush_op.u4_error_code);
return UNKNOWN_ERROR;
}
-
+ mIsInFlush = true;
return OK;
}
@@ -1079,7 +1088,6 @@
}
status_t C2SoftAvcDec::deInitDecoder() {
- size_t i;
IV_API_CALL_STATUS_T status;
if (mCodecCtx) {
@@ -1206,7 +1214,6 @@
if (mSignalledError) {
return;
}
-
if (NULL == mCodecCtx) {
if (OK != initDecoder()) {
ALOGE("Failed to initialize decoder");
@@ -1221,66 +1228,78 @@
setParams(mStride);
}
- const C2ConstLinearBlock &buffer =
- work->input.buffers[0]->data().linearBlocks().front();
- if (buffer.capacity() == 0) {
- // TODO: result?
+ uint32_t workIndex = 0;
+ std::unique_ptr<C2ReadView> input;
+ if (work) {
+ work->result = C2_OK;
- std::vector<std::unique_ptr<C2Work>> done;
- done.emplace_back(std::move(work));
- mListener->onWorkDone_nb(shared_from_this(), std::move(done));
- if (!(work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
- return;
- }
+ const C2ConstLinearBlock &buffer =
+ work->input.buffers[0]->data().linearBlocks().front();
+ if (buffer.capacity() == 0) {
+ // TODO: result?
- mReceivedEOS = true;
- // TODO: flush
- } else if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
- mReceivedEOS = true;
- }
-
- C2ReadView input = work->input.buffers[0]->data().linearBlocks().front().map().get();
- uint32_t workIndex = work->input.ordinal.frame_index & 0xFFFFFFFF;
-
- // TODO: populate --- assume display order?
- if (!mAllocatedBlock) {
- // TODO: error handling
- // TODO: format & usage
- uint32_t format = HAL_PIXEL_FORMAT_YV12;
- C2MemoryUsage usage = { C2MemoryUsage::kSoftwareRead, C2MemoryUsage::kSoftwareWrite };
- // TODO: lock access to interface
- C2BlockPool::local_id_t poolId =
- mIntf->mOutputBlockPools->flexCount() ?
- mIntf->mOutputBlockPools->m.mValues[0] : C2BlockPool::BASIC_GRAPHIC;
- if (!mOutputBlockPool || mOutputBlockPool->getLocalId() != poolId) {
- c2_status_t err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
- if (err != C2_OK) {
- // TODO: trip
+ std::vector<std::unique_ptr<C2Work>> done;
+ done.emplace_back(std::move(work));
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
+ if (!(work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
+ return;
}
- }
- ALOGE("using allocator %u", mOutputBlockPool->getAllocatorId());
- (void)mOutputBlockPool->fetchGraphicBlock(
- mWidth, mHeight, format, usage, &mAllocatedBlock);
- ALOGE("provided (%dx%d) required (%dx%d)", mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
+ mReceivedEOS = true;
+ // TODO: flush
+ } else if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ ALOGV("input EOS: %llu", work->input.ordinal.frame_index);
+ mReceivedEOS = true;
+ }
+
+ input.reset(new C2ReadView(work->input.buffers[0]->data().linearBlocks().front().map().get()));
+ workIndex = work->input.ordinal.frame_index & 0xFFFFFFFF;
}
- C2GraphicView output = mAllocatedBlock->map().get();
- ALOGE("mapped err = %d", output.error());
size_t inOffset = 0u;
- while (inOffset < input.capacity()) {
+ while (!input || inOffset < input->capacity()) {
+ if (!input) {
+ ALOGV("flushing");
+ }
+ // TODO: populate --- assume display order?
+ if (!mAllocatedBlock) {
+ // TODO: error handling
+ // TODO: format & usage
+ uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ C2MemoryUsage usage = { C2MemoryUsage::kSoftwareRead, C2MemoryUsage::kSoftwareWrite };
+ // TODO: lock access to interface
+ C2BlockPool::local_id_t poolId =
+ mIntf->mOutputBlockPools->flexCount() ?
+ mIntf->mOutputBlockPools->m.mValues[0] : C2BlockPool::BASIC_GRAPHIC;
+ if (!mOutputBlockPool || mOutputBlockPool->getLocalId() != poolId) {
+ c2_status_t err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
+ if (err != C2_OK) {
+ // TODO: trip
+ }
+ }
+ ALOGE("using allocator %u", mOutputBlockPool->getAllocatorId());
+
+ (void)mOutputBlockPool->fetchGraphicBlock(
+ mWidth, mHeight, format, usage, &mAllocatedBlock);
+ ALOGE("provided (%dx%d) required (%dx%d)", mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
+ }
+ C2GraphicView output = mAllocatedBlock->map().get();
+ if (output.error() != OK) {
+ ALOGE("mapped err = %d", output.error());
+ }
+
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- if (!setDecodeArgs(&s_dec_ip, &s_dec_op, &input, &output, workIndex, inOffset)) {
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, input.get(), &output, workIndex, inOffset)) {
ALOGE("Decoder arg setup failed");
// TODO: notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
return;
}
- ALOGE("Decoder arg setup succeeded");
+ ALOGV("Decoder arg setup succeeded");
// If input dump is enabled, then write to file
DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes, mInputOffset);
@@ -1321,15 +1340,24 @@
PRINT_TIME("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
- ALOGI("bytes total=%u", input.capacity());
+ if (input) {
+ ALOGI("bytes total=%u", input->capacity());
+ }
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
- if (1 != s_dec_op.u4_frame_decoded_flag) {
- /* If the input did not contain picture data, then ignore
- * the associated timestamp */
- //mTimeStampsValid[workIndex] = false;
+ if (1 != s_dec_op.u4_frame_decoded_flag && work) {
+ /* If the input did not contain picture data, return work without
+ * buffer */
+ ALOGV("no picture data");
+ std::vector<std::unique_ptr<C2Work>> done;
+ done.push_back(std::move(work));
+ done[0]->worklets.front()->output.flags = (C2BufferPack::flags_t)0;
+ done[0]->worklets.front()->output.buffers.clear();
+ done[0]->worklets.front()->output.buffers.emplace_back(nullptr);
+ done[0]->worklets.front()->output.ordinal = done[0]->input.ordinal;
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
}
// If the decoder is in the changing resolution mode and there is no output present,
@@ -1373,10 +1401,19 @@
}
if (s_dec_op.u4_output_present) {
- ALOGV("output_present");
- // TODO: outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
+ ALOGV("output_present: %d", s_dec_op.u4_ts);
std::vector<std::unique_ptr<C2Work>> done;
- done.push_back(std::move(mPendingWork[s_dec_op.u4_ts]));
+ {
+ std::unique_lock<std::mutex> lock(mPendingLock);
+ done.push_back(std::move(mPendingWork[s_dec_op.u4_ts]));
+ mPendingWork.erase(s_dec_op.u4_ts);
+ }
+ uint32_t flags = 0;
+ if (done[0]->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
+ flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ ALOGV("EOS");
+ }
+ done[0]->worklets.front()->output.flags = (C2BufferPack::flags_t)flags;
done[0]->worklets.front()->output.buffers.clear();
done[0]->worklets.front()->output.buffers.emplace_back(
std::make_shared<GraphicBuffer>(std::move(mAllocatedBlock)));
@@ -1391,16 +1428,25 @@
/* If EOS was recieved on input port and there is no output
* from the codec, then signal EOS on output port */
if (mReceivedEOS) {
- // TODO
- // outHeader->nFilledLen = 0;
- // outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+ std::vector<std::unique_ptr<C2Work>> done;
+ {
+ std::unique_lock<std::mutex> lock(mPendingLock);
+ if (!mPendingWork.empty()) {
+ done.push_back(std::move(mPendingWork.begin()->second));
+ mPendingWork.erase(mPendingWork.begin());
+ }
+ }
+ if (!done.empty()) {
+ ALOGV("sending empty EOS buffer");
+ done[0]->worklets.front()->output.flags = C2BufferPack::FLAG_END_OF_STREAM;
+ done[0]->worklets.front()->output.buffers.clear();
+ done[0]->worklets.front()->output.buffers.emplace_back(nullptr);
+ done[0]->worklets.front()->output.ordinal = done[0]->input.ordinal;
+ mListener->onWorkDone_nb(shared_from_this(), std::move(done));
+ }
- // outInfo->mOwnedByUs = false;
- // outQueue.erase(outQueue.begin());
- // outInfo = NULL;
- // notifyFillBufferDone(outHeader);
- // outHeader = NULL;
resetPlugin();
+ return;
}
}
inOffset += s_dec_op.u4_num_bytes_consumed;
@@ -1465,14 +1511,17 @@
class C2SoftAvcDecFactory : public C2ComponentFactory {
public:
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) override {
- *component = std::make_shared<C2SoftAvcDec>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(::android::C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(new C2SoftAvcDec("avc", id), deleter);
return C2_OK;
}
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) override {
- *interface = std::make_shared<C2SoftAvcDecIntf>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) override {
+ *interface =
+ std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
return C2_OK;
}
@@ -1482,9 +1531,11 @@
} // namespace android
extern "C" ::android::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
return new ::android::C2SoftAvcDecFactory();
}
extern "C" void DestroyCodec2Factory(::android::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
delete factory;
}
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
index 5deaf5d..28f1dfd 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
@@ -58,9 +58,6 @@
#define MIN(a, b) ((a) < (b)) ? (a) : (b)
-/** Used to remove warnings about unused parameters */
-#define UNUSED(x) ((void)(x))
-
/** Get time */
#define GETTIME(a, b) gettimeofday(a, b);
@@ -80,7 +77,7 @@
};
C2SoftAvcDecIntf(const char *name, c2_node_id_t id);
- virtual ~C2SoftAvcDecIntf() = default;
+ virtual ~C2SoftAvcDecIntf() override;
// From C2ComponentInterface
virtual C2String getName() const override;
diff --git a/media/libstagefright/codecs/cmds/Android.bp b/media/libstagefright/codecs/cmds/Android.bp
index e44e53c..ad0bd2d 100644
--- a/media/libstagefright/codecs/cmds/Android.bp
+++ b/media/libstagefright/codecs/cmds/Android.bp
@@ -22,7 +22,6 @@
"libstagefright",
"libstagefright_codec2",
"libstagefright_foundation",
- "libstagefright_soft_c2avcdec",
"libui",
"libutils",
],
diff --git a/media/libstagefright/codecs/cmds/codec2.cpp b/media/libstagefright/codecs/cmds/codec2.cpp
index 8e2c4b9..1972a7a 100644
--- a/media/libstagefright/codecs/cmds/codec2.cpp
+++ b/media/libstagefright/codecs/cmds/codec2.cpp
@@ -211,10 +211,9 @@
return;
}
- std::unique_ptr<C2ComponentFactory> factory(CreateCodec2Factory());
+ std::shared_ptr<C2ComponentStore> store = GetCodec2PlatformComponentStore();
std::shared_ptr<C2Component> component;
- (void)factory->createComponent(&component, 0);
- DestroyCodec2Factory(factory.release());
+ (void)store->createComponent("c2.google.avc.decoder", &component);
(void)component->setListener_sm(mListener);
std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
diff --git a/media/libstagefright/include/CCodecBufferChannel.h b/media/libstagefright/include/CCodecBufferChannel.h
new file mode 100644
index 0000000..354cee2
--- /dev/null
+++ b/media/libstagefright/include/CCodecBufferChannel.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_BUFFER_CHANNEL_H_
+
+#define A_BUFFER_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#include <C2Buffer.h>
+#include <C2Component.h>
+
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/ICrypto.h>
+
+namespace android {
+
+/**
+ * BufferChannelBase implementation for ACodec.
+ */
+class CCodecBufferChannel : public BufferChannelBase {
+public:
+ class Buffers {
+ public:
+ Buffers() = default;
+ virtual ~Buffers() = default;
+
+ inline void setAlloc(const std::shared_ptr<C2BlockPool> &alloc) { mAlloc = alloc; }
+ inline void setFormat(const sp<AMessage> &format) { mFormat = format; }
+ inline const std::shared_ptr<C2BlockPool> &getAlloc() { return mAlloc; }
+
+ protected:
+ // Input: this object uses it to allocate input buffers with which the
+ // client fills.
+ // Output: this object passes it to the component.
+ std::shared_ptr<C2BlockPool> mAlloc;
+ sp<AMessage> mFormat;
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(Buffers);
+ };
+
+ class InputBuffers : public Buffers {
+ public:
+ using Buffers::Buffers;
+ virtual ~InputBuffers() = default;
+
+ virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) = 0;
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ virtual void flush() = 0;
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(InputBuffers);
+ };
+
+ class OutputBuffers : public Buffers {
+ public:
+ using Buffers::Buffers;
+ virtual ~OutputBuffers() = default;
+
+ virtual bool registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) = 0;
+ virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ virtual void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(OutputBuffers);
+ };
+
+ CCodecBufferChannel(const std::function<void(status_t, enum ActionCode)> &onError);
+ virtual ~CCodecBufferChannel();
+
+ // BufferChannelBase interface
+ virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual status_t queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ bool secure,
+ const uint8_t *key,
+ const uint8_t *iv,
+ CryptoPlugin::Mode mode,
+ CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples,
+ size_t numSubSamples,
+ AString *errorDetailMsg) override;
+ virtual status_t renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) override;
+ virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+ virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+
+ // Methods below are interface for CCodec to use.
+
+ void setComponent(const std::shared_ptr<C2Component> &component);
+ status_t setSurface(const sp<Surface> &surface);
+
+ /**
+ * Set C2BlockPool for input buffers.
+ *
+ * TODO: start timestamp?
+ */
+ void setInputBufferAllocator(const sp<C2BlockPool> &inAlloc);
+
+ /**
+ * Set C2BlockPool for output buffers. This object shall never use the
+ * allocator itself; it's just passed
+ *
+ * TODO: start timestamp?
+ */
+ void setOutputBufferAllocator(const sp<C2BlockPool> &outAlloc);
+
+ /**
+ * Start queueing buffers to the component. This object should never queue
+ * buffers before this call.
+ */
+ void start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat);
+
+ /**
+ * Stop queueing buffers to the component. This object should never queue
+ * buffers after this call, until start() is called.
+ */
+ void stop();
+
+ void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ /**
+ * Notify MediaCodec about work done.
+ *
+ * @param workItems finished work items.
+ */
+ void onWorkDone(std::vector<std::unique_ptr<C2Work>> workItems);
+
+private:
+ class QueueGuard;
+
+ class QueueSync {
+ public:
+ inline QueueSync() : mCount(-1) {}
+ ~QueueSync() = default;
+
+ void start();
+ void stop();
+
+ private:
+ std::mutex mMutex;
+ std::atomic_int32_t mCount;
+
+ friend class CCodecBufferChannel::QueueGuard;
+ };
+
+ class QueueGuard {
+ public:
+ QueueGuard(QueueSync &sync);
+ ~QueueGuard();
+ inline bool isRunning() { return mRunning; }
+
+ private:
+ QueueSync &mSync;
+ bool mRunning;
+ };
+
+ QueueSync mSync;
+ sp<MemoryDealer> mDealer;
+ sp<IMemory> mDecryptDestination;
+ int32_t mHeapSeqNum;
+
+ std::shared_ptr<C2Component> mComponent;
+ std::function<void(status_t, enum ActionCode)> mOnError;
+ std::shared_ptr<C2BlockPool> mInputAllocator;
+ QueueSync mQueueSync;
+ Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
+ Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
+
+ std::atomic_uint64_t mFrameIndex;
+ std::atomic_uint64_t mFirstValidFrameIndex;
+
+ sp<MemoryDealer> makeMemoryDealer(size_t heapSize);
+ Mutexed<sp<Surface>> mSurface;
+
+ inline bool hasCryptoOrDescrambler() {
+ return mCrypto != NULL || mDescrambler != NULL;
+ }
+};
+
+} // namespace android
+
+#endif // A_BUFFER_CHANNEL_H_
diff --git a/media/libstagefright/include/Codec2Buffer.h b/media/libstagefright/include/Codec2Buffer.h
new file mode 100644
index 0000000..0272cea
--- /dev/null
+++ b/media/libstagefright/include/Codec2Buffer.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_BUFFER_H_
+
+#define CODEC2_BUFFER_H_
+
+#include <C2Buffer.h>
+
+#include <media/MediaCodecBuffer.h>
+
+namespace android {
+
+class C2Buffer;
+
+/**
+ * MediaCodecBuffer implementation wraps around C2LinearBlock.
+ */
+class Codec2Buffer : public MediaCodecBuffer {
+public:
+ static sp<Codec2Buffer> allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block);
+
+ virtual ~Codec2Buffer() = default;
+
+ C2ConstLinearBlock share();
+
+private:
+ Codec2Buffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ const std::shared_ptr<C2LinearBlock> &block);
+ Codec2Buffer() = delete;
+
+ std::shared_ptr<C2LinearBlock> mBlock;
+};
+
+} // namespace android
+
+#endif // CODEC2_BUFFER_H_
diff --git a/media/libstagefright/include/media/stagefright/CCodec.h b/media/libstagefright/include/media/stagefright/CCodec.h
new file mode 100644
index 0000000..3e24bbe
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/CCodec.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C_CODEC_H_
+#define C_CODEC_H_
+
+#include <chrono>
+
+#include <C2Component.h>
+
+#include <android/native_window.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/SkipCutBuffer.h>
+#include <utils/NativeHandle.h>
+#include <hardware/gralloc.h>
+#include <nativebase/nativebase.h>
+
+namespace android {
+
+class CCodecBufferChannel;
+
+class CCodec : public CodecBase {
+public:
+ CCodec();
+
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
+ virtual void initiateAllocateComponent(const sp<AMessage> &msg) override;
+ virtual void initiateConfigureComponent(const sp<AMessage> &msg) override;
+ virtual void initiateCreateInputSurface() override;
+ virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface) override;
+ virtual void initiateStart() override;
+ virtual void initiateShutdown(bool keepComponentAllocated = false) override;
+
+ virtual status_t setSurface(const sp<Surface> &surface) override;
+
+ virtual void signalFlush() override;
+ virtual void signalResume() override;
+
+ virtual void signalSetParameters(const sp<AMessage> &msg) override;
+ virtual void signalEndOfInputStream() override;
+ virtual void signalRequestIDRFrame() override;
+
+ void initiateReleaseIfStuck();
+
+protected:
+ virtual ~CCodec();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg) override;
+
+private:
+ typedef std::chrono::time_point<std::chrono::steady_clock> TimePoint;
+
+ void initiateStop();
+ void initiateRelease(bool sendCallback = true);
+
+ void allocate(const AString &componentName);
+ void configure(const sp<AMessage> &msg);
+ void start();
+ void stop();
+ void flush();
+ void release(bool sendCallback);
+
+ void setDeadline(const TimePoint &deadline);
+
+ enum {
+ kWhatAllocate,
+ kWhatConfigure,
+ kWhatStart,
+ kWhatFlush,
+ kWhatStop,
+ kWhatRelease,
+ };
+
+ enum {
+ RELEASED,
+ ALLOCATED,
+ FLUSHED,
+ RUNNING,
+
+ ALLOCATING, // RELEASED -> ALLOCATED
+ STARTING, // ALLOCATED -> RUNNING
+ STOPPING, // RUNNING -> ALLOCATED
+ FLUSHING, // RUNNING -> FLUSHED
+ RESUMING, // FLUSHED -> RUNNING
+ RELEASING, // {ANY EXCEPT RELEASED} -> RELEASED
+ };
+
+ struct State {
+ inline State() : mState(RELEASED) {}
+
+ int mState;
+ std::shared_ptr<C2Component> mComp;
+ };
+
+ struct Formats {
+ sp<AMessage> mInputFormat;
+ sp<AMessage> mOutputFormat;
+ };
+
+ Mutexed<State> mState;
+ std::shared_ptr<CCodecBufferChannel> mChannel;
+ std::shared_ptr<C2Component::Listener> mListener;
+ Mutexed<TimePoint> mDeadline;
+ Mutexed<Formats> mFormats;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CCodec);
+};
+
+} // namespace android
+
+#endif // C_CODEC_H_
diff --git a/media/libstagefright/include/media/stagefright/CodecBase.h b/media/libstagefright/include/media/stagefright/CodecBase.h
index 9197f7b..268662f 100644
--- a/media/libstagefright/include/media/stagefright/CodecBase.h
+++ b/media/libstagefright/include/media/stagefright/CodecBase.h
@@ -18,6 +18,7 @@
#define CODEC_BASE_H_
+#include <list>
#include <memory>
#include <stdint.h>
@@ -26,7 +27,6 @@
#include <media/hardware/CryptoAPI.h>
#include <media/hardware/HardwareAPI.h>
-#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/ColorUtils.h>
diff --git a/media/libstagefright/include/media/stagefright/MediaFilter.h b/media/libstagefright/include/media/stagefright/MediaFilter.h
index 0c10d11..a28c49d 100644
--- a/media/libstagefright/include/media/stagefright/MediaFilter.h
+++ b/media/libstagefright/include/media/stagefright/MediaFilter.h
@@ -57,7 +57,7 @@
OWNED_BY_UPSTREAM,
};
- IOMX::buffer_id mBufferID;
+ uint32_t mBufferID;
int32_t mGeneration;
int32_t mOutputFlags;
Status mStatus;
@@ -121,7 +121,7 @@
status_t allocateBuffersOnPort(OMX_U32 portIndex);
BufferInfo *findBufferByID(
- uint32_t portIndex, IOMX::buffer_id bufferID,
+ uint32_t portIndex, uint32_t bufferID,
ssize_t *index = NULL);
void postFillThisBuffer(BufferInfo *info);
void postDrainThisBuffer(BufferInfo *info);
diff --git a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
index 5060dc1..6aede08 100644
--- a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
+++ b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
@@ -47,7 +47,8 @@
static sp<SimpleDecodingSource> Create(
const sp<MediaSource> &source, uint32_t flags,
const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec = NULL);
+ const char *desiredCodec = NULL,
+ bool skipMediaCodecList = false);
static sp<SimpleDecodingSource> Create(
const sp<MediaSource> &source, uint32_t flags = 0);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e77907a..e0d0d7b 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -903,9 +903,7 @@
mConfig.inputCfg.buffer.raw = NULL;
}
mInBuffer = buffer;
- if (buffer != nullptr) { // FIXME: EffectHalHidl::setInBuffer should accept null input.
- mEffectInterface->setInBuffer(buffer);
- }
+ mEffectInterface->setInBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
// aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
@@ -947,9 +945,7 @@
mConfig.outputCfg.buffer.raw = NULL;
}
mOutBuffer = buffer;
- if (buffer != nullptr) {
- mEffectInterface->setOutBuffer(buffer);
- }
+ mEffectInterface->setOutBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
// Note: Any effect that does not accumulate does not need mOutBuffer16 and
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b169bac..d9cd121 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -34,8 +34,8 @@
class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
{
public:
- explicit AudioInputDescriptor(const sp<IOProfile>& profile);
- void setIoHandle(audio_io_handle_t ioHandle);
+ explicit AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface);
audio_port_handle_t getId() const;
audio_module_handle_t getModuleHandle() const;
uint32_t getOpenRefCount() const;
@@ -73,6 +73,14 @@
void setPatchHandle(audio_patch_handle_t handle);
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input);
+ void close();
+
private:
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
@@ -85,6 +93,7 @@
// a particular input started and prevent preemption of this active input by this session.
// We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
SortedVector<audio_session_t> mPreemptedSessions;
+ AudioPolicyClientInterface *mClientInterface;
};
class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c09cb5a..0be8fc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -101,8 +101,6 @@
status_t dump(int fd);
- void setIoHandle(audio_io_handle_t ioHandle);
-
virtual audio_devices_t device() const;
virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
virtual audio_devices_t supportedDevices();
@@ -122,6 +120,14 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port *port) const;
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output);
+ void close();
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ec04ef7..118f0d2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -34,7 +34,11 @@
{
public:
IOProfile(const String8 &name, audio_port_role_t role)
- : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
+ : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
+ maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+ curOpenCount(0),
+ maxActiveCount(1),
+ curActiveCount(0) {}
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
@@ -103,6 +107,34 @@
const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
+ bool canOpenNewIo() {
+ if (maxOpenCount == 0 || curOpenCount < maxOpenCount) {
+ return true;
+ }
+ return false;
+ }
+
+ bool canStartNewIo() {
+ if (maxActiveCount == 0 || curActiveCount < maxActiveCount) {
+ return true;
+ }
+ return false;
+ }
+
+ // Maximum number of input or output streams that can be simultaneously opened for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
+ // profiles and 0 for input profiles
+ uint32_t maxOpenCount;
+ // Number of streams currently opened for this profile.
+ uint32_t curOpenCount;
+ // Maximum number of input or output streams that can be simultaneously active for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
+ // profiles and 1 for input profiles
+ uint32_t maxActiveCount;
+ // Number of streams currently active for this profile. This is not the number of active clients
+ // (AudioTrack or AudioRecord) but the number of active HAL streams.
+ uint32_t curActiveCount;
+
private:
DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
index 078b582..3b0e209 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Serializer.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -92,6 +92,8 @@
static const char name[];
static const char role[];
static const char flags[];
+ static const char maxOpenCount[];
+ static const char maxActiveCount[];
};
typedef IOProfile Element;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 2492ed6..737872d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::AudioInputDescriptor"
//#define LOG_NDEBUG 0
+#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
#include "IOProfile.h"
#include "AudioGain.h"
@@ -26,10 +27,12 @@
namespace android {
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface)
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
- mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
+ mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
+ mClientInterface(clientInterface)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -39,12 +42,6 @@
}
}
-void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
{
if (mProfile == 0) {
@@ -192,6 +189,71 @@
return config;
}
+status_t AudioInputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+
+ ALOGV("opening input for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
+ input,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ source,
+ flags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openInput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *input;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+
+void AudioInputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ mClientInterface->closeInput(mIoHandle);
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4d3c3b5..be5a1c1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -23,6 +23,7 @@
#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
+#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
// A device mask for all audio output devices that are considered "remote" when evaluating
@@ -231,13 +232,6 @@
}
}
-void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
-
status_t SwAudioOutputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
@@ -387,6 +381,93 @@
return changed;
}
+status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
+ lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ lConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lConfig.offload_info.sample_rate = lConfig.sample_rate;
+ lConfig.offload_info.channel_mask = lConfig.channel_mask;
+ lConfig.offload_info.format = lConfig.format;
+ lConfig.offload_info.stream_type = stream;
+ lConfig.offload_info.duration_us = -1;
+ lConfig.offload_info.has_video = true; // conservative
+ lConfig.offload_info.is_streaming = true; // likely
+ }
+
+ mFlags = (audio_output_flags_t)(mFlags | flags);
+
+ ALOGV("opening output for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
+ output,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ &mLatency,
+ mFlags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openOutput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *output;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+
+void SwAudioOutputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ AudioParameter param;
+ param.add(String8("closing"), String8("true"));
+ mClientInterface->setParameters(mIoHandle, param.toString());
+
+ mClientInterface->closeOutput(mIoHandle);
+
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 74ef4ec..fc89672 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -122,6 +122,16 @@
result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
+
+ result.clear();
+ snprintf(buffer, SIZE, "\n - maxOpenCount: %u - curOpenCount: %u\n",
+ maxOpenCount, curOpenCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " - maxActiveCount: %u - curActiveCount: %u\n",
+ maxActiveCount, curActiveCount);
+ result.append(buffer);
+
+ write(fd, result.string(), result.size());
}
void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 0908ffc..aa589f4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -217,6 +217,8 @@
const char MixPortTraits::Attributes::name[] = "name";
const char MixPortTraits::Attributes::role[] = "role";
const char MixPortTraits::Attributes::flags[] = "flags";
+const char MixPortTraits::Attributes::maxOpenCount[] = "maxOpenCount";
+const char MixPortTraits::Attributes::maxActiveCount[] = "maxActiveCount";
status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
PtrSerializingCtx /*serializingContext*/)
@@ -259,6 +261,14 @@
mixPort->setFlags(InputFlagConverter::maskFromString(flags));
}
}
+ string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
+ if (!maxOpenCount.empty()) {
+ convertTo(maxOpenCount, mixPort->maxOpenCount);
+ }
+ string maxActiveCount = getXmlAttribute(child, Attributes::maxActiveCount);
+ if (!maxActiveCount.empty()) {
+ convertTo(maxActiveCount, mixPort->maxActiveCount);
+ }
// Deserialize children
AudioGainTraits::Collection gains;
deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 62cbfc1..b363779 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -843,12 +843,10 @@
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
- ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
+ ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %x, channel mask %x, flags %x",
device, config->sample_rate, config->format, config->channel_mask, flags);
- *output = getOutputForDevice(device, session, *stream,
- config->sample_rate, config->format, config->channel_mask,
- flags, &config->offload_info);
+ *output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
@@ -867,11 +865,8 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ const audio_config_t *config,
+ audio_output_flags_t flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
@@ -898,7 +893,7 @@
if (stream == AUDIO_STREAM_TTS) {
flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
@@ -909,8 +904,8 @@
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
- audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
- audio_channel_count_from_out_mask(channelMask) <= 2) {
+ audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
+ audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
goto non_direct_output;
}
@@ -924,102 +919,58 @@
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
- samplingRate,
- format,
- channelMask,
+ config->sample_rate,
+ config->format,
+ config->channel_mask,
(audio_output_flags_t)flags);
}
if (profile != 0) {
- sp<SwAudioOutputDescriptor> outputDesc = NULL;
-
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
- outputDesc = desc;
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
- if (session == outputDesc->mDirectClientSession) {
- outputDesc->mDirectOpenCount++;
- ALOGV("getOutputForDevice() reusing direct output %d for session %d",
- mOutputs.keyAt(i), session);
- return mOutputs.keyAt(i);
- } else {
- ALOGV("getOutputForDevice() do not reuse direct output because"
- "current client (%d) is not the same as requesting client (%d)",
- outputDesc->mDirectClientSession, session);
- goto non_direct_output;
- }
+ if ((config->sample_rate == desc->mSamplingRate) &&
+ audio_formats_match(config->format, desc->mFormat) &&
+ (config->channel_mask == desc->mChannelMask) &&
+ (session == desc->mDirectClientSession)) {
+ desc->mDirectOpenCount++;
+ ALOGV("getOutputForDevice() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
+ return mOutputs.keyAt(i);
}
}
}
- // close direct output if currently open and configured with different parameters
- if (outputDesc != NULL) {
- closeOutput(outputDesc->mIoHandle);
+
+ if (!profile->canOpenNewIo()) {
+ goto non_direct_output;
}
- // if the selected profile is offloaded and no offload info was specified,
- // create a default one
- audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
- if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- defaultOffloadInfo.sample_rate = samplingRate;
- defaultOffloadInfo.channel_mask = channelMask;
- defaultOffloadInfo.format = format;
- defaultOffloadInfo.stream_type = stream;
- defaultOffloadInfo.bit_rate = 0;
- defaultOffloadInfo.duration_us = -1;
- defaultOffloadInfo.has_video = true; // conservative
- defaultOffloadInfo.is_streaming = true; // likely
- offloadInfo = &defaultOffloadInfo;
- }
-
- outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- outputDesc->mDevice = device;
- outputDesc->mLatency = 0;
- outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = samplingRate;
- config.channel_mask = channelMask;
- config.format = format;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
- String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
- : String8("");
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ sp<SwAudioOutputDescriptor> outputDesc =
+ new SwAudioOutputDescriptor(profile, mpClientInterface);
+ status = outputDesc->open(config, device, String8(""), stream, flags, &output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
- (samplingRate != 0 && samplingRate != config.sample_rate) ||
- (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
- (channelMask != 0 && channelMask != config.channel_mask)) {
- ALOGV("getOutputForDevice() failed opening direct output: output %d samplingRate %d %d,"
- "format %d %d, channelMask %04x %04x", output, samplingRate,
- outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
- outputDesc->mChannelMask);
+ (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
+ (config->format != AUDIO_FORMAT_DEFAULT &&
+ !audio_formats_match(config->format, outputDesc->mFormat)) ||
+ (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+ ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
+ "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+ outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
+ config->channel_mask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeOutput(output);
+ outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
+ if (audio_is_linear_pcm(config->format) &&
+ config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
@@ -1045,18 +996,18 @@
// open a non direct output
// for non direct outputs, only PCM is supported
- if (audio_is_linear_pcm(format)) {
+ if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, flags, format);
+ output = selectOutput(outputs, flags, config->format);
}
ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
- "samplingRate %d, format %d, channels %x, flags %x",
- stream, samplingRate, format, channelMask, flags);
+ "sampling rate %d, format %d, channels %x, flags %x",
+ stream, config->sample_rate, config->format, config->channel_mask, flags);
return output;
}
@@ -1155,6 +1106,13 @@
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+ if (!outputDesc->isActive()) {
+ if (!outputDesc->mProfile->canStartNewIo()) {
+ return INVALID_OPERATION;
+ }
+ outputDesc->mProfile->curActiveCount++;
+ }
+
// Routing?
mOutputRoutes.incRouteActivity(session);
@@ -1182,6 +1140,12 @@
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
+ if (!outputDesc->isActive()) {
+ LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, outputDesc->mProfile->curActiveCount);
+ outputDesc->mProfile->curActiveCount--;
+ }
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
@@ -1370,7 +1334,15 @@
}
}
- return stopSource(outputDesc, stream, forceDeviceUpdate);
+ status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
+
+ if (status == NO_ERROR && !outputDesc->isActive()) {
+ LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, outputDesc->mProfile->curActiveCount);
+ outputDesc->mProfile->curActiveCount--;
+ }
+ return status;
}
status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
@@ -1473,7 +1445,7 @@
input_type_t *inputType,
audio_port_handle_t *portId)
{
- ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+ ALOGV("getInputForAttr() source %d, sampling rate %d, format %d, channel mask %x,"
"session %d, flags %#x",
attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
@@ -1485,6 +1457,10 @@
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
+ }
+
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -1541,9 +1517,6 @@
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
halInputSource = inputSource;
// TODO: check for existing client for this port ID
@@ -1593,7 +1566,7 @@
}
*input = getInputForDevice(device, address, session, uid, inputSource,
- config->sample_rate, config->format, config->channel_mask, flags,
+ config, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
@@ -1620,9 +1593,7 @@
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
@@ -1641,16 +1612,17 @@
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
- // samplingRate and flags may be updated by getInputProfile
- uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
- audio_format_t profileFormat = format;
- audio_channel_mask_t profileChannelMask = channelMask;
+ // sampling rate and flags may be updated by getInputProfile
+ uint32_t profileSamplingRate = (config->sample_rate == 0) ?
+ SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
+ audio_format_t profileFormat = config->format;
+ audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
profile = getInputProfile(device, address,
@@ -1664,12 +1636,13 @@
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForDevice() could not find profile for device 0x%X, "
- "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
- device, samplingRate, format, channelMask, flags);
+ "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
+ device, config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
+ uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
@@ -1680,14 +1653,14 @@
}
sp<AudioSession> audioSession = new AudioSession(session,
- inputSource,
- format,
- samplingRate,
- channelMask,
- flags,
- uid,
- isSoundTrigger,
- policyMix, mpClientInterface);
+ inputSource,
+ config->format,
+ samplingRate,
+ config->channel_mask,
+ flags,
+ uid,
+ isSoundTrigger,
+ policyMix, mpClientInterface);
// FIXME: disable concurrent capture until UI is ready
#if 0
@@ -1731,8 +1704,8 @@
// can be selected.
if (!isConcurrentSource(inputSource) &&
((desc->mSamplingRate != samplingRate ||
- desc->mChannelMask != channelMask ||
- !audio_formats_match(desc->mFormat, format)) &&
+ desc->mChannelMask != config->channel_mask ||
+ !audio_formats_match(desc->mFormat, config->format)) &&
(source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
source_priority(inputSource)))) {
reusedInputDesc = desc;
@@ -1755,44 +1728,34 @@
}
#endif
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = profileSamplingRate;
- config.channel_mask = profileChannelMask;
- config.format = profileFormat;
-
- if (address == "") {
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
- // the inputs vector must be of size 1, but we don't want to crash here
- address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
+ if (!profile->canOpenNewIo()) {
+ return AUDIO_IO_HANDLE_NONE;
}
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &device,
- address,
- halInputSource,
- profileFlags);
+ sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
+
+ audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = profileSamplingRate;
+ lConfig.channel_mask = profileChannelMask;
+ lConfig.format = profileFormat;
+
+ status_t status = inputDesc->open(&lConfig, device, address,
+ halInputSource, profileFlags, &input);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
- (profileSamplingRate != config.sample_rate) ||
- !audio_formats_match(profileFormat, config.format) ||
- (profileChannelMask != config.channel_mask)) {
- ALOGW("getInputForAttr() failed opening input: samplingRate %d"
- ", format %d, channelMask %x",
- samplingRate, format, channelMask);
+ (profileSamplingRate != lConfig.sample_rate) ||
+ !audio_formats_match(profileFormat, lConfig.format) ||
+ (profileChannelMask != lConfig.channel_mask)) {
+ ALOGW("getInputForAttr() failed opening input: sampling rate %d"
+ ", format %d, channel mask %x",
+ profileSamplingRate, profileFormat, profileChannelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeInput(input);
+ inputDesc->close();
}
return AUDIO_IO_HANDLE_NONE;
}
- sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
- inputDesc->mSamplingRate = profileSamplingRate;
- inputDesc->mFormat = profileFormat;
- inputDesc->mChannelMask = profileChannelMask;
- inputDesc->mDevice = device;
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
@@ -2006,6 +1969,13 @@
setInputDevice(input, device, true /* force */);
if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+ if (!inputDesc->mProfile->canStartNewIo()) {
+ mInputRoutes.decRouteActivity(session);
+ audioSession->changeActiveCount(-1);
+ return INVALID_OPERATION;
+ }
+ inputDesc->mProfile->curActiveCount++;
+
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2075,6 +2045,11 @@
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
+ LOG_ALWAYS_FATAL_IF(inputDesc->mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __FUNCTION__, inputDesc->mProfile->curActiveCount);
+ inputDesc->mProfile->curActiveCount--;
+
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2169,7 +2144,7 @@
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
- mpClientInterface->closeInput(mInputs.keyAt(input_index));
+ inputDesc->close();
}
mInputs.clear();
SoundTrigger::setCaptureState(false);
@@ -3632,6 +3607,12 @@
{
const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
+ if (!outProfile->canOpenNewIo()) {
+ ALOGE("Invalid Output profile max open count %u for profile %s",
+ outProfile->maxOpenCount, outProfile->getTagName().c_str());
+ continue;
+ }
+
if (!outProfile->hasSupportedDevices()) {
ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
continue;
@@ -3660,30 +3641,15 @@
const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
-
- outputDesc->mDevice = profileType;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = outputDesc->mSamplingRate;
- config.channel_mask = outputDesc->mChannelMask;
- config.format = outputDesc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ status_t status = outputDesc->open(nullptr, profileType, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
mHwModules[i]->getName());
} else {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
-
for (size_t k = 0; k < supportedDevices.size(); k++) {
ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
// give a valid ID to an attached device once confirmed it is reachable
@@ -3697,11 +3663,11 @@
}
addOutput(output, outputDesc);
setOutputDevice(outputDesc,
- outputDesc->mDevice,
+ profileType,
true,
0,
NULL,
- address.string());
+ address);
}
}
// open input streams needed to access attached devices to validate
@@ -3710,6 +3676,12 @@
{
const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
+ if (!inProfile->canOpenNewIo()) {
+ ALOGE("Invalid Input profile max open count %u for profile %s",
+ inProfile->maxOpenCount, inProfile->getTagName().c_str());
+ continue;
+ }
+
if (!inProfile->hasSupportedDevices()) {
ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
continue;
@@ -3722,30 +3694,15 @@
continue;
}
sp<AudioInputDescriptor> inputDesc =
- new AudioInputDescriptor(inProfile);
+ new AudioInputDescriptor(inProfile, mpClientInterface);
- inputDesc->mDevice = profileType;
-
- // find the address
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
- // the inputs vector must be of size 1, but we don't want to crash here
- String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
- : String8("");
- ALOGV(" for input device 0x%x using address %s", profileType, address.string());
- ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = inputDesc->mSamplingRate;
- config.channel_mask = inputDesc->mChannelMask;
- config.format = inputDesc->mFormat;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
- &input,
- &config,
- &inputDesc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
+ status_t status = inputDesc->open(nullptr,
+ profileType,
+ String8(""),
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
@@ -3760,10 +3717,10 @@
}
}
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
} else {
ALOGW("Cannot open input stream for device %08x on hw module %s",
- inputDesc->mDevice,
+ profileType,
mHwModules[i]->getName());
}
}
@@ -3804,10 +3761,10 @@
AudioPolicyManager::~AudioPolicyManager()
{
for (size_t i = 0; i < mOutputs.size(); i++) {
- mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ mOutputs.valueAt(i)->close();
}
for (size_t i = 0; i < mInputs.size(); i++) {
- mpClientInterface->closeInput(mInputs.keyAt(i));
+ mInputs.valueAt(i)->close();
}
mAvailableOutputDevices.clear();
mAvailableInputDevices.clear();
@@ -3825,7 +3782,6 @@
void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
- outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
updateMono(output); // update mono status when adding to output list
selectOutputForMusicEffects();
@@ -3840,7 +3796,6 @@
void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
{
- inputDesc->setIoHandle(input);
mInputs.add(input, inputDesc);
nextAudioPortGeneration();
}
@@ -3934,30 +3889,20 @@
continue;
}
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Output number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
ALOGV("opening output for device %08x with params %s profile %p name %s",
device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
- config.offload_info.sample_rate = desc->mSamplingRate;
- config.offload_info.channel_mask = desc->mChannelMask;
- config.offload_info.format = desc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
+ status_t status = desc->open(nullptr, device, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status == NO_ERROR) {
// Here is where the out_set_parameters() for card & device gets called
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
@@ -3967,27 +3912,21 @@
updateAudioProfiles(device, output, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkOutputsForDevice() missing param");
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
} else if (profile->hasDynamicAudioProfile()) {
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
- profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ profile->pickAudioProfile(
+ config.sample_rate, config.channel_mask, config.format);
config.offload_info.sample_rate = config.sample_rate;
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
- } else {
+
+ status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+ AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status != NO_ERROR) {
output = AUDIO_IO_HANDLE_NONE;
}
}
@@ -4033,7 +3972,7 @@
} else {
ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
mPrimaryOutput->mIoHandle, output);
- mpClientInterface->closeOutput(output);
+ desc->close();
removeOutput(output);
nextAudioPortGeneration();
output = AUDIO_IO_HANDLE_NONE;
@@ -4161,6 +4100,7 @@
for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
sp<IOProfile> profile = profiles[profile_index];
+
// nothing to do if one input is already opened for this profile
size_t input_index;
for (input_index = 0; input_index < mInputs.size(); input_index++) {
@@ -4176,31 +4116,22 @@
continue;
}
- ALOGV("opening input for device 0x%X with params %s", device, address.string());
- desc = new AudioInputDescriptor(profile);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Input number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
+ desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-
- ALOGV("opening inputput for device %08x with params %s profile %p name %s",
- desc->mDevice, address.string(), profile.get(), profile->getName().string());
-
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &desc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+ status_t status = desc->open(nullptr,
+ device,
+ address,
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
-
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
mpClientInterface->setParameters(input, String8(param));
@@ -4209,7 +4140,7 @@
updateAudioProfiles(device, input, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkInputsForDevice() direct input missing param");
- mpClientInterface->closeInput(input);
+ desc->close();
input = AUDIO_IO_HANDLE_NONE;
}
@@ -4317,11 +4248,8 @@
mpClientInterface->onAudioPatchListUpdate();
}
- AudioParameter param;
- param.add(String8("closing"), String8("true"));
- mpClientInterface->setParameters(output, param.toString());
+ outputDesc->close();
- mpClientInterface->closeOutput(output);
removeOutput(output);
mPreviousOutputs = mOutputs;
}
@@ -4346,7 +4274,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
mInputs.removeItem(input);
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11894dc..2d41bd1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -601,20 +601,15 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo);
+ const audio_config_t *config,
+ audio_output_flags_t flags);
// internal method to return the input handle for the given device and format
audio_io_handle_t getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index bd94e3e..1ee5ccf 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -278,8 +278,8 @@
return NO_INIT;
}
// already checked by client, but double-check in case the client wrapper is bypassed
- if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
- attr->source != AUDIO_SOURCE_FM_TUNER) {
+ if (attr->source < AUDIO_SOURCE_DEFAULT && attr->source >= AUDIO_SOURCE_CNT &&
+ attr->source != AUDIO_SOURCE_HOTWORD && attr->source != AUDIO_SOURCE_FM_TUNER) {
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2cf648f..585d2eb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -859,6 +859,12 @@
outputStreams.push(getPreviewStreamId());
+ if (params.isDeviceZslSupported) {
+ // If device ZSL is supported, resume preview buffers that may be paused
+ // during last takePicture().
+ mDevice->dropStreamBuffers(false, getPreviewStreamId());
+ }
+
if (!params.recordingHint) {
if (!restart) {
res = mStreamingProcessor->updatePreviewRequest(params);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index a407d0b..910dd78 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -136,7 +136,12 @@
const char *enddump = "\n\n";
write(fd, enddump, strlen(enddump));
- return mHardware->dump(fd, args);
+ sp<CameraHardwareInterface> hardware = mHardware;
+ if (hardware != nullptr) {
+ return hardware->dump(fd, args);
+ }
+ ALOGI("%s: camera device closed already, skip dumping", __FUNCTION__);
+ return OK;
}
// ----------------------------------------------------------------------------
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index b65f1c7..1ee216f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -553,6 +553,12 @@
return DONE;
}
+ if (l.mParameters.isDeviceZslSupported) {
+ // If device ZSL is supported, drop all pending preview buffers to reduce the chance of
+ // rendering preview frames newer than the still frame.
+ client->getCameraDevice()->dropStreamBuffers(true, client->getPreviewStreamId());
+ }
+
/**
* Clear the streaming request for still-capture pictures
* (as opposed to i.e. video snapshots)
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 68384b0..f1f96c3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -359,6 +359,12 @@
const std::vector<android::camera3::OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index e0a2dd4..c0db8e7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2039,6 +2039,20 @@
return res;
}
+status_t Camera3Device::dropStreamBuffers(bool dropping, int streamId) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ int idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
+ return BAD_VALUE;
+ }
+
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
+ return stream->dropBuffers(dropping);
+}
+
/**
* Camera3Device private methods
*/
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 357b893..e9466ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -192,6 +192,12 @@
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ status_t dropStreamBuffers(bool dropping, int streamId) override;
+
private:
// internal typedefs
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 4b36ea2..0a245c4 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -108,6 +108,10 @@
return false;
}
+status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
+ return OK;
+}
+
status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
__FUNCTION__, mId);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3212031..684f4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -57,6 +57,12 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
/**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) override;
+
+ /**
* Return if this output stream is for video encoding.
*/
bool isVideoStream() const;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 329172a..e79eecc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -44,6 +44,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (mConsumer == NULL) {
@@ -70,6 +71,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -100,6 +102,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -139,6 +142,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -227,9 +231,14 @@
/**
* Return buffer back to ANativeWindow
*/
- if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
// Cancel buffer
- ALOGW("A frame is dropped for stream %d", mId);
+ if (mDropBuffers) {
+ ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
+ } else {
+ ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+ }
+
res = currentConsumer->cancelBuffer(currentConsumer.get(),
anwBuffer,
anwReleaseFence);
@@ -785,6 +794,12 @@
return res;
}
+status_t Camera3OutputStream::dropBuffers(bool dropping) {
+ Mutex::Autolock l(mLock);
+ mDropBuffers = dropping;
+ return OK;
+}
+
status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index fbb14fe..18b1901 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -166,6 +166,11 @@
virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
/**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool dropping) override;
+
+ /**
* Set the graphic buffer manager to get/return the stream buffers.
*
* It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
@@ -260,6 +265,9 @@
*/
uint64_t mConsumerUsage;
+ // Whether to drop valid buffers.
+ bool mDropBuffers;
+
/**
* Internal Camera3Stream interface
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index edfbab1..08fcf38 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -73,6 +73,11 @@
const std::vector<OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) = 0;
};
} // namespace camera3