Merge "AImageReader: Add support for private format" into oc-mr1-dev
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
index dc933e3..d78f286 100644
--- a/media/libaaudio/examples/loopback/jni/Android.mk
+++ b/media/libaaudio/examples/loopback/jni/Android.mk
@@ -4,7 +4,8 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
# NDK recommends using this kind of relative path instead of an absolute path.
LOCAL_SRC_FILES:= ../src/loopback.cpp
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
new file mode 100644
index 0000000..21cf341
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tools for measuring latency and for detecting glitches.
+ * These classes are pure math and can be used with any audio system.
+ */
+
+#ifndef AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H
+#define AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H
+
+#include <algorithm>
+#include <assert.h>
+#include <cctype>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+// Tag for machine readable results as property = value pairs
+#define LOOPBACK_RESULT_TAG "RESULT: "
+#define LOOPBACK_SAMPLE_RATE 48000
+
+#define MILLIS_PER_SECOND 1000
+
+#define MAX_ZEROTH_PARTIAL_BINS 40
+
+static const float s_Impulse[] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.2f, // silence on each side of the impulse
+ 0.5f, 0.9999f, 0.0f, -0.9999, -0.5f, // bipolar
+ -0.2f, 0.0f, 0.0f, 0.0f, 0.0f
+};
+
+class PseudoRandom {
+public:
+ PseudoRandom() {}
+ PseudoRandom(int64_t seed)
+ : mSeed(seed)
+ {}
+
+ /**
+ * Returns the next random double from -1.0 to 1.0
+ *
+ * @return value from -1.0 to 1.0
+ */
+ double nextRandomDouble() {
+ return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
+ }
+
+ /** Calculate random 32 bit number using linear-congruential method. */
+ int32_t nextRandomInteger() {
+ // Use values for 64-bit sequence from MMIX by Donald Knuth.
+ mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
+ return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
+ }
+
+private:
+ int64_t mSeed = 99887766;
+};
+
+static double calculateCorrelation(const float *a,
+ const float *b,
+ int windowSize)
+{
+ double correlation = 0.0;
+ double sumProducts = 0.0;
+ double sumSquares = 0.0;
+
+ // Correlate a against b.
+ for (int i = 0; i < windowSize; i++) {
+ float s1 = a[i];
+ float s2 = b[i];
+ // Use a normalized cross-correlation.
+ sumProducts += s1 * s2;
+ sumSquares += ((s1 * s1) + (s2 * s2));
+ }
+
+ if (sumSquares >= 0.00000001) {
+ correlation = (float) (2.0 * sumProducts / sumSquares);
+ }
+ return correlation;
+}
+
+static int calculateCorrelations(const float *haystack, int haystackSize,
+ const float *needle, int needleSize,
+ float *results, int resultSize)
+{
+ int maxCorrelations = haystackSize - needleSize;
+ int numCorrelations = std::min(maxCorrelations, resultSize);
+
+ for (int ic = 0; ic < numCorrelations; ic++) {
+ double correlation = calculateCorrelation(&haystack[ic], needle, needleSize);
+ results[ic] = correlation;
+ }
+
+ return numCorrelations;
+}
+
+/*==========================================================================================*/
+/**
+ * Scan until we get a correlation of a single scan that goes over the tolerance level,
+ * peaks then drops back down.
+ */
+static double findFirstMatch(const float *haystack, int haystackSize,
+ const float *needle, int needleSize, double threshold )
+{
+ int ic;
+ // How many correlations can we calculate?
+ int numCorrelations = haystackSize - needleSize;
+ double maxCorrelation = 0.0;
+ int peakIndex = -1;
+ double location = -1.0;
+ const double backThresholdScaler = 0.5;
+
+ for (ic = 0; ic < numCorrelations; ic++) {
+ double correlation = calculateCorrelation(&haystack[ic], needle, needleSize);
+
+ if( (correlation > maxCorrelation) ) {
+ maxCorrelation = correlation;
+ peakIndex = ic;
+ }
+
+ //printf("PaQa_FindFirstMatch: ic = %4d, correlation = %8f, maxSum = %8f\n",
+ // ic, correlation, maxSum );
+ // Are we past what we were looking for?
+ if((maxCorrelation > threshold) && (correlation < backThresholdScaler * maxCorrelation)) {
+ location = peakIndex;
+ break;
+ }
+ }
+
+ return location;
+}
+
+typedef struct LatencyReport_s {
+ double latencyInFrames;
+ double confidence;
+} LatencyReport;
+
+// Apply a technique similar to Harmonic Product Spectrum Analysis to find echo fundamental.
+// Using first echo instead of the original impulse for a better match.
+static int measureLatencyFromEchos(const float *haystack, int haystackSize,
+ const float *needle, int needleSize,
+ LatencyReport *report) {
+ const double threshold = 0.1;
+
+ // Find first peak
+ int first = (int) (findFirstMatch(haystack,
+ haystackSize,
+ needle,
+ needleSize,
+ threshold) + 0.5);
+
+ // Use first echo as the needle for the other echos because
+ // it will be more similar.
+ needle = &haystack[first];
+ int again = (int) (findFirstMatch(haystack,
+ haystackSize,
+ needle,
+ needleSize,
+ threshold) + 0.5);
+
+ printf("first = %d, again at %d\n", first, again);
+ first = again;
+
+ // Allocate results array
+ int remaining = haystackSize - first;
+ const int maxReasonableLatencyFrames = 48000 * 2; // arbitrary but generous value
+ int numCorrelations = std::min(remaining, maxReasonableLatencyFrames);
+ float *correlations = new float[numCorrelations];
+ float *harmonicSums = new float[numCorrelations](); // set to zero
+
+ // Generate correlation for every position.
+ numCorrelations = calculateCorrelations(&haystack[first], remaining,
+ needle, needleSize,
+ correlations, numCorrelations);
+
+ // Add higher harmonics mapped onto lower harmonics.
+ // This reinforces the "fundamental" echo.
+ const int numEchoes = 10;
+ for (int partial = 1; partial < numEchoes; partial++) {
+ for (int i = 0; i < numCorrelations; i++) {
+ harmonicSums[i / partial] += correlations[i] / partial;
+ }
+ }
+
+ // Find highest peak in correlation array.
+ float maxCorrelation = 0.0;
+ float sumOfPeaks = 0.0;
+ int peakIndex = 0;
+ const int skip = MAX_ZEROTH_PARTIAL_BINS; // skip low bins
+ for (int i = skip; i < numCorrelations; i++) {
+ if (harmonicSums[i] > maxCorrelation) {
+ maxCorrelation = harmonicSums[i];
+ sumOfPeaks += maxCorrelation;
+ peakIndex = i;
+ printf("maxCorrelation = %f at %d\n", maxCorrelation, peakIndex);
+ }
+ }
+
+ report->latencyInFrames = peakIndex;
+ if (sumOfPeaks < 0.0001) {
+ report->confidence = 0.0;
+ } else {
+ report->confidence = maxCorrelation / sumOfPeaks;
+ }
+
+ delete[] correlations;
+ delete[] harmonicSums;
+ return 0;
+}
+
+class AudioRecording
+{
+public:
+ AudioRecording() {
+ }
+ ~AudioRecording() {
+ delete[] mData;
+ }
+
+ void allocate(int maxFrames) {
+ delete[] mData;
+ mData = new float[maxFrames];
+ mMaxFrames = maxFrames;
+ }
+
+ // Write SHORT data from the first channel.
+ int write(int16_t *inputData, int inputChannelCount, int numFrames) {
+ // stop at end of buffer
+ if ((mFrameCounter + numFrames) > mMaxFrames) {
+ numFrames = mMaxFrames - mFrameCounter;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
+ }
+ return numFrames;
+ }
+
+ // Write FLOAT data from the first channel.
+ int write(float *inputData, int inputChannelCount, int numFrames) {
+ // stop at end of buffer
+ if ((mFrameCounter + numFrames) > mMaxFrames) {
+ numFrames = mMaxFrames - mFrameCounter;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ mData[mFrameCounter++] = inputData[i * inputChannelCount];
+ }
+ return numFrames;
+ }
+
+ int size() {
+ return mFrameCounter;
+ }
+
+ float *getData() {
+ return mData;
+ }
+
+ int save(const char *fileName, bool writeShorts = true) {
+ int written = 0;
+ const int chunkSize = 64;
+ FILE *fid = fopen(fileName, "wb");
+ if (fid == NULL) {
+ return -errno;
+ }
+
+ if (writeShorts) {
+ int16_t buffer[chunkSize];
+ int32_t framesLeft = mFrameCounter;
+ int32_t cursor = 0;
+ while (framesLeft) {
+ int32_t framesToWrite = framesLeft < chunkSize ? framesLeft : chunkSize;
+ for (int i = 0; i < framesToWrite; i++) {
+ buffer[i] = (int16_t) (mData[cursor++] * 32767);
+ }
+ written += fwrite(buffer, sizeof(int16_t), framesToWrite, fid);
+ framesLeft -= framesToWrite;
+ }
+ } else {
+ written = (int) fwrite(mData, sizeof(float), mFrameCounter, fid);
+ }
+ fclose(fid);
+ return written;
+ }
+
+private:
+ float *mData = nullptr;
+ int32_t mFrameCounter = 0;
+ int32_t mMaxFrames = 0;
+};
+
+// ====================================================================================
+class LoopbackProcessor {
+public:
+ virtual ~LoopbackProcessor() = default;
+
+
+ virtual void reset() {}
+
+ virtual void process(float *inputData, int inputChannelCount,
+ float *outputData, int outputChannelCount,
+ int numFrames) = 0;
+
+
+ virtual void report() = 0;
+
+ virtual void printStatus() {};
+
+ virtual bool isDone() {
+ return false;
+ }
+
+ void setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ int32_t getSampleRate() {
+ return mSampleRate;
+ }
+
+ // Measure peak amplitude of buffer.
+ static float measurePeakAmplitude(float *inputData, int inputChannelCount, int numFrames) {
+ float peak = 0.0f;
+ for (int i = 0; i < numFrames; i++) {
+ float pos = fabs(*inputData);
+ if (pos > peak) {
+ peak = pos;
+ }
+ inputData += inputChannelCount;
+ }
+ return peak;
+ }
+
+
+private:
+ int32_t mSampleRate = LOOPBACK_SAMPLE_RATE;
+};
+
+class PeakDetector {
+public:
+ float process(float input) {
+ float output = mPrevious * mDecay;
+ if (input > output) {
+ output = input;
+ }
+ mPrevious = output;
+ return output;
+ }
+
+private:
+ float mDecay = 0.99f;
+ float mPrevious = 0.0f;
+};
+
+
+static void printAudioScope(float sample) {
+ const int maxStars = 80
+ ; // arbitrary, fits on one line
+ char c = '*';
+ if (sample < -1.0) {
+ sample = -1.0;
+ c = '$';
+ } else if (sample > 1.0) {
+ sample = 1.0;
+ c = '$';
+ }
+ int numSpaces = (int) (((sample + 1.0) * 0.5) * maxStars);
+ for (int i = 0; i < numSpaces; i++) {
+ putchar(' ');
+ }
+ printf("%c\n", c);
+}
+
+// ====================================================================================
+/**
+ * Measure latency given a loopback stream data.
+ * Uses a state machine to cycle through various stages including:
+ *
+ */
+class EchoAnalyzer : public LoopbackProcessor {
+public:
+
+ EchoAnalyzer() : LoopbackProcessor() {
+ audioRecorder.allocate(2 * LOOPBACK_SAMPLE_RATE);
+ }
+
+ void reset() override {
+ mDownCounter = 200;
+ mLoopCounter = 0;
+ mMeasuredLoopGain = 0.0f;
+ mEchoGain = 1.0f;
+ mState = STATE_INITIAL_SILENCE;
+ }
+
+ virtual bool isDone() {
+ return mState == STATE_DONE;
+ }
+
+ void setGain(float gain) {
+ mEchoGain = gain;
+ }
+
+ float getGain() {
+ return mEchoGain;
+ }
+
+ void report() override {
+
+ printf("EchoAnalyzer ---------------\n");
+ printf(LOOPBACK_RESULT_TAG "measured.gain = %f\n", mMeasuredLoopGain);
+ printf(LOOPBACK_RESULT_TAG "echo.gain = %f\n", mEchoGain);
+ printf(LOOPBACK_RESULT_TAG "frame.count = %d\n", mFrameCounter);
+ printf(LOOPBACK_RESULT_TAG "test.state = %d\n", mState);
+ if (mMeasuredLoopGain >= 0.9999) {
+ printf(" ERROR - clipping, turn down volume slightly\n");
+ } else {
+ const float *needle = s_Impulse;
+ int needleSize = (int) (sizeof(s_Impulse) / sizeof(float));
+ float *haystack = audioRecorder.getData();
+ int haystackSize = audioRecorder.size();
+ int result = measureLatencyFromEchos(haystack, haystackSize,
+ needle, needleSize,
+ &latencyReport);
+ if (latencyReport.confidence < 0.01) {
+ printf(" ERROR - confidence too low = %f\n", latencyReport.confidence);
+ } else {
+ double latencyMillis = 1000.0 * latencyReport.latencyInFrames / getSampleRate();
+ printf(LOOPBACK_RESULT_TAG "latency.frames = %8.2f\n", latencyReport.latencyInFrames);
+ printf(LOOPBACK_RESULT_TAG "latency.msec = %8.2f\n", latencyMillis);
+ printf(LOOPBACK_RESULT_TAG "latency.confidence = %8.6f\n", latencyReport.confidence);
+ }
+ }
+
+ {
+#define ECHO_FILENAME "/data/oboe_echo.raw"
+ int written = audioRecorder.save(ECHO_FILENAME);
+ printf("Echo wrote %d mono samples to %s on Android device\n", written, ECHO_FILENAME);
+ }
+ }
+
+ void printStatus() override {
+ printf("state = %d, echo gain = %f ", mState, mEchoGain);
+ }
+
+ static void sendImpulse(float *outputData, int outputChannelCount) {
+ for (float sample : s_Impulse) {
+ *outputData = sample;
+ outputData += outputChannelCount;
+ }
+ }
+
+ void process(float *inputData, int inputChannelCount,
+ float *outputData, int outputChannelCount,
+ int numFrames) override {
+ int channelsValid = std::min(inputChannelCount, outputChannelCount);
+ float peak = 0.0f;
+ int numWritten;
+ int numSamples;
+
+ echo_state_t nextState = mState;
+
+ switch (mState) {
+ case STATE_INITIAL_SILENCE:
+ // Output silence at the beginning.
+ numSamples = numFrames * outputChannelCount;
+ for (int i = 0; i < numSamples; i++) {
+ outputData[i] = 0;
+ }
+ if (mDownCounter-- <= 0) {
+ nextState = STATE_MEASURING_GAIN;
+ //printf("%5d: switch to STATE_MEASURING_GAIN\n", mLoopCounter);
+ mDownCounter = 8;
+ }
+ break;
+
+ case STATE_MEASURING_GAIN:
+ sendImpulse(outputData, outputChannelCount);
+ peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
+ // If we get several in a row then go to next state.
+ if (peak > mPulseThreshold) {
+ if (mDownCounter-- <= 0) {
+ nextState = STATE_WAITING_FOR_SILENCE;
+ //printf("%5d: switch to STATE_WAITING_FOR_SILENCE, measured peak = %f\n",
+ // mLoopCounter, peak);
+ mDownCounter = 8;
+ mMeasuredLoopGain = peak; // assumes original pulse amplitude is one
+ // Calculate gain that will give us a nice decaying echo.
+ mEchoGain = mDesiredEchoGain / mMeasuredLoopGain;
+ }
+ } else {
+ mDownCounter = 8;
+ }
+ break;
+
+ case STATE_WAITING_FOR_SILENCE:
+ // Output silence.
+ numSamples = numFrames * outputChannelCount;
+ for (int i = 0; i < numSamples; i++) {
+ outputData[i] = 0;
+ }
+ peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
+ // If we get several in a row then go to next state.
+ if (peak < mSilenceThreshold) {
+ if (mDownCounter-- <= 0) {
+ nextState = STATE_SENDING_PULSE;
+ //printf("%5d: switch to STATE_SENDING_PULSE\n", mLoopCounter);
+ mDownCounter = 8;
+ }
+ } else {
+ mDownCounter = 8;
+ }
+ break;
+
+ case STATE_SENDING_PULSE:
+ audioRecorder.write(inputData, inputChannelCount, numFrames);
+ sendImpulse(outputData, outputChannelCount);
+ nextState = STATE_GATHERING_ECHOS;
+ //printf("%5d: switch to STATE_GATHERING_ECHOS\n", mLoopCounter);
+ break;
+
+ case STATE_GATHERING_ECHOS:
+ numWritten = audioRecorder.write(inputData, inputChannelCount, numFrames);
+ peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
+ if (peak > mMeasuredLoopGain) {
+ mMeasuredLoopGain = peak; // AGC might be raising gain so adjust it on the fly.
+ // Recalculate gain that will give us a nice decaying echo.
+ mEchoGain = mDesiredEchoGain / mMeasuredLoopGain;
+ }
+ // Echo input to output.
+ for (int i = 0; i < numFrames; i++) {
+ int ic;
+ for (ic = 0; ic < channelsValid; ic++) {
+ outputData[ic] = inputData[ic] * mEchoGain;
+ }
+ for (; ic < outputChannelCount; ic++) {
+ outputData[ic] = 0;
+ }
+ inputData += inputChannelCount;
+ outputData += outputChannelCount;
+ }
+ if (numWritten < numFrames) {
+ nextState = STATE_DONE;
+ //printf("%5d: switch to STATE_DONE\n", mLoopCounter);
+ }
+ break;
+
+ case STATE_DONE:
+ default:
+ break;
+ }
+
+ mState = nextState;
+ mLoopCounter++;
+ }
+
+private:
+
+ enum echo_state_t {
+ STATE_INITIAL_SILENCE,
+ STATE_MEASURING_GAIN,
+ STATE_WAITING_FOR_SILENCE,
+ STATE_SENDING_PULSE,
+ STATE_GATHERING_ECHOS,
+ STATE_DONE
+ };
+
+ int mDownCounter = 500;
+ int mLoopCounter = 0;
+ int mLoopStart = 1000;
+ float mPulseThreshold = 0.02f;
+ float mSilenceThreshold = 0.002f;
+ float mMeasuredLoopGain = 0.0f;
+ float mDesiredEchoGain = 0.95f;
+ float mEchoGain = 1.0f;
+ echo_state_t mState = STATE_INITIAL_SILENCE;
+ int32_t mFrameCounter = 0;
+
+ AudioRecording audioRecorder;
+ LatencyReport latencyReport;
+ PeakDetector mPeakDetector;
+};
+
+
+// ====================================================================================
+/**
+ * Output a steady sinewave and analyze the return signal.
+ *
+ * Use a cosine transform to measure the predicted magnitude and relative phase of the
+ * looped back sine wave. Then generate a predicted signal and compare with the actual signal.
+ */
+class SineAnalyzer : public LoopbackProcessor {
+public:
+
+ void report() override {
+ printf("SineAnalyzer ------------------\n");
+ printf(LOOPBACK_RESULT_TAG "peak.amplitude = %7.5f\n", mPeakAmplitude);
+ printf(LOOPBACK_RESULT_TAG "sine.magnitude = %7.5f\n", mMagnitude);
+ printf(LOOPBACK_RESULT_TAG "phase.offset = %7.5f\n", mPhaseOffset);
+ printf(LOOPBACK_RESULT_TAG "ref.phase = %7.5f\n", mPhase);
+ printf(LOOPBACK_RESULT_TAG "frames.accumulated = %6d\n", mFramesAccumulated);
+ printf(LOOPBACK_RESULT_TAG "sine.period = %6d\n", mPeriod);
+ printf(LOOPBACK_RESULT_TAG "test.state = %6d\n", mState);
+ printf(LOOPBACK_RESULT_TAG "frame.count = %6d\n", mFrameCounter);
+ // Did we ever get a lock?
+ bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
+ if (!gotLock) {
+ printf("ERROR - failed to lock on reference sine tone\n");
+ } else {
+ // Only print if meaningful.
+ printf(LOOPBACK_RESULT_TAG "glitch.count = %6d\n", mGlitchCount);
+ }
+ }
+
+ void printStatus() override {
+ printf(" state = %d, glitches = %d,", mState, mGlitchCount);
+ }
+
+ double calculateMagnitude(double *phasePtr = NULL) {
+ if (mFramesAccumulated == 0) {
+ return 0.0;
+ }
+ double sinMean = mSinAccumulator / mFramesAccumulated;
+ double cosMean = mCosAccumulator / mFramesAccumulated;
+ double magnitude = 2.0 * sqrt( (sinMean * sinMean) + (cosMean * cosMean ));
+ if( phasePtr != NULL )
+ {
+ double phase = M_PI_2 - atan2( sinMean, cosMean );
+ *phasePtr = phase;
+ }
+ return magnitude;
+ }
+
+ /**
+ * @param inputData contains microphone data with sine signal feedback
+ * @param outputData contains the reference sine wave
+ */
+ void process(float *inputData, int inputChannelCount,
+ float *outputData, int outputChannelCount,
+ int numFrames) override {
+ float sample;
+ float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
+ if (peak > mPeakAmplitude) {
+ mPeakAmplitude = peak;
+ }
+
+ for (int i = 0; i < numFrames; i++) {
+ float sample = inputData[i * inputChannelCount];
+
+ float sinOut = sinf(mPhase);
+
+ switch (mState) {
+ case STATE_IMMUNE:
+ case STATE_WAITING_FOR_SIGNAL:
+ break;
+ case STATE_WAITING_FOR_LOCK:
+ mSinAccumulator += sample * sinOut;
+ mCosAccumulator += sample * cosf(mPhase);
+ mFramesAccumulated++;
+ // Must be a multiple of the period or the calculation will not be accurate.
+ if (mFramesAccumulated == mPeriod * 4) {
+ mPhaseOffset = 0.0;
+ mMagnitude = calculateMagnitude(&mPhaseOffset);
+ if (mMagnitude > mThreshold) {
+ if (fabs(mPreviousPhaseOffset - mPhaseOffset) < 0.001) {
+ mState = STATE_LOCKED;
+ //printf("%5d: switch to STATE_LOCKED\n", mFrameCounter);
+ }
+ mPreviousPhaseOffset = mPhaseOffset;
+ }
+ resetAccumulator();
+ }
+ break;
+
+ case STATE_LOCKED: {
+ // Predict next sine value
+ float predicted = sinf(mPhase + mPhaseOffset) * mMagnitude;
+ // printf(" predicted = %f, actual = %f\n", predicted, sample);
+
+ float diff = predicted - sample;
+ if (fabs(diff) > mTolerance) {
+ mGlitchCount++;
+ //printf("%5d: Got a glitch # %d, predicted = %f, actual = %f\n",
+ // mFrameCounter, mGlitchCount, predicted, sample);
+ mState = STATE_IMMUNE;
+ //printf("%5d: switch to STATE_IMMUNE\n", mFrameCounter);
+ mDownCounter = mPeriod; // Set duration of IMMUNE state.
+ }
+ } break;
+ }
+
+ // Output sine wave so we can measure it.
+ outputData[i * outputChannelCount] = (sinOut * mOutputAmplitude)
+ + (mWhiteNoise.nextRandomDouble() * mNoiseAmplitude);
+ // printf("%5d: sin(%f) = %f, %f\n", i, mPhase, sinOut, mPhaseIncrement);
+
+ // advance and wrap phase
+ mPhase += mPhaseIncrement;
+ if (mPhase > M_PI) {
+ mPhase -= (2.0 * M_PI);
+ }
+
+ mFrameCounter++;
+ }
+
+ // Do these once per buffer.
+ switch (mState) {
+ case STATE_IMMUNE:
+ mDownCounter -= numFrames;
+ if (mDownCounter <= 0) {
+ mState = STATE_WAITING_FOR_SIGNAL;
+ //printf("%5d: switch to STATE_WAITING_FOR_SIGNAL\n", mFrameCounter);
+ }
+ break;
+ case STATE_WAITING_FOR_SIGNAL:
+ if (peak > mThreshold) {
+ mState = STATE_WAITING_FOR_LOCK;
+ //printf("%5d: switch to STATE_WAITING_FOR_LOCK\n", mFrameCounter);
+ resetAccumulator();
+ }
+ break;
+ case STATE_WAITING_FOR_LOCK:
+ case STATE_LOCKED:
+ break;
+ }
+
+ }
+
+ void resetAccumulator() {
+ mFramesAccumulated = 0;
+ mSinAccumulator = 0.0;
+ mCosAccumulator = 0.0;
+ }
+
+ void reset() override {
+ mGlitchCount = 0;
+ mState = STATE_IMMUNE;
+ mPhaseIncrement = 2.0 * M_PI / mPeriod;
+ printf("phaseInc = %f for period %d\n", mPhaseIncrement, mPeriod);
+ resetAccumulator();
+ }
+
+private:
+
+ enum sine_state_t {
+ STATE_IMMUNE,
+ STATE_WAITING_FOR_SIGNAL,
+ STATE_WAITING_FOR_LOCK,
+ STATE_LOCKED
+ };
+
+ int mPeriod = 79;
+ double mPhaseIncrement = 0.0;
+ double mPhase = 0.0;
+ double mPhaseOffset = 0.0;
+ double mPreviousPhaseOffset = 0.0;
+ double mMagnitude = 0.0;
+ double mThreshold = 0.005;
+ double mTolerance = 0.01;
+ int32_t mFramesAccumulated = 0;
+ double mSinAccumulator = 0.0;
+ double mCosAccumulator = 0.0;
+ int32_t mGlitchCount = 0;
+ double mPeakAmplitude = 0.0;
+ int mDownCounter = 4000;
+ int32_t mFrameCounter = 0;
+ float mOutputAmplitude = 0.75;
+
+ int32_t mZeroCrossings = 0;
+
+ PseudoRandom mWhiteNoise;
+ float mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
+
+ sine_state_t mState = STATE_IMMUNE;
+};
+
+
+#undef LOOPBACK_SAMPLE_RATE
+#undef LOOPBACK_RESULT_TAG
+
+#endif /* AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H */
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 57d45cd..144c941 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -14,8 +14,7 @@
* limitations under the License.
*/
-// Play an impulse and then record it.
-// Measure the round trip latency.
+// Audio loopback tests to measure the round trip latency and glitches.
#include <algorithm>
#include <assert.h>
@@ -28,460 +27,38 @@
#include <aaudio/AAudio.h>
#include <aaudio/AAudioTesting.h>
+#include "AAudioSimplePlayer.h"
+#include "AAudioSimpleRecorder.h"
+#include "AAudioExampleUtils.h"
+#include "LoopbackAnalyzer.h"
+
// Tag for machine readable results as property = value pairs
#define RESULT_TAG "RESULT: "
#define SAMPLE_RATE 48000
#define NUM_SECONDS 5
#define NUM_INPUT_CHANNELS 1
#define FILENAME "/data/oboe_input.raw"
-
-#define NANOS_PER_MICROSECOND ((int64_t)1000)
-#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define MILLIS_PER_SECOND 1000
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
-
-#define MAX_ZEROTH_PARTIAL_BINS 40
-
-static const float s_Impulse[] = {
- 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, // silence on each side of the impulse
- 0.5f, 0.9f, 0.0f, -0.9f, -0.5f, // bipolar
- 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
-
-
-static double calculateCorrelation(const float *a,
- const float *b,
- int windowSize)
-{
- double correlation = 0.0;
- double sumProducts = 0.0;
- double sumSquares = 0.0;
-
- // Correlate a against b.
- for (int i = 0; i < windowSize; i++) {
- float s1 = a[i];
- float s2 = b[i];
- // Use a normalized cross-correlation.
- sumProducts += s1 * s2;
- sumSquares += ((s1 * s1) + (s2 * s2));
- }
-
- if (sumSquares >= 0.00000001) {
- correlation = (float) (2.0 * sumProducts / sumSquares);
- }
- return correlation;
-}
-
-static int calculateCorrelations(const float *haystack, int haystackSize,
- const float *needle, int needleSize,
- float *results, int resultSize)
-{
- int ic;
- int maxCorrelations = haystackSize - needleSize;
- int numCorrelations = std::min(maxCorrelations, resultSize);
-
- for (ic = 0; ic < numCorrelations; ic++) {
- double correlation = calculateCorrelation(&haystack[ic], needle, needleSize);
- results[ic] = correlation;
- }
-
- return numCorrelations;
-}
-
-/*==========================================================================================*/
-/**
- * Scan until we get a correlation of a single scan that goes over the tolerance level,
- * peaks then drops back down.
- */
-static double findFirstMatch(const float *haystack, int haystackSize,
- const float *needle, int needleSize, double threshold )
-{
- int ic;
- // How many correlations can we calculate?
- int numCorrelations = haystackSize - needleSize;
- double maxCorrelation = 0.0;
- int peakIndex = -1;
- double location = -1.0;
-
- for (ic = 0; ic < numCorrelations; ic++) {
- double correlation = calculateCorrelation(&haystack[ic], needle, needleSize);
-
- if( (correlation > maxCorrelation) ) {
- maxCorrelation = correlation;
- peakIndex = ic;
- }
-
- //printf("PaQa_FindFirstMatch: ic = %4d, correlation = %8f, maxSum = %8f\n",
- // ic, correlation, maxSum );
- // Are we past what we were looking for?
- if((maxCorrelation > threshold) && (correlation < 0.5 * maxCorrelation)) {
- location = peakIndex;
- break;
- }
- }
-
- return location;
-}
-
-typedef struct LatencyReport_s {
- double latencyInFrames;
- double confidence;
-} LatencyReport;
-
-// Apply a technique similar to Harmonic Product Spectrum Analysis to find echo fundamental.
-// Using first echo instead of the original impulse for a better match.
-int measureLatencyFromEchos(const float *haystack, int haystackSize,
- const float *needle, int needleSize,
- LatencyReport *report) {
- double threshold = 0.1;
-
- // Find first peak
- int first = (int) (findFirstMatch(haystack,
- haystackSize,
- needle,
- needleSize,
- threshold) + 0.5);
-
- // Use first echo as the needle for the other echos because
- // it will be more similar.
- needle = &haystack[first];
- int again = (int) (findFirstMatch(haystack,
- haystackSize,
- needle,
- needleSize,
- threshold) + 0.5);
-
- printf("first = %d, again at %d\n", first, again);
- first = again;
-
- // Allocate results array
- int remaining = haystackSize - first;
- int generous = 48000 * 2;
- int numCorrelations = std::min(remaining, generous);
- float *correlations = new float[numCorrelations];
- float *harmonicSums = new float[numCorrelations](); // cleared to zero
-
- // Generate correlation for every position.
- numCorrelations = calculateCorrelations(&haystack[first], remaining,
- needle, needleSize,
- correlations, numCorrelations);
-
- // Add higher harmonics mapped onto lower harmonics.
- // This reinforces the "fundamental" echo.
- const int numEchoes = 10;
- for (int partial = 1; partial < numEchoes; partial++) {
- for (int i = 0; i < numCorrelations; i++) {
- harmonicSums[i / partial] += correlations[i] / partial;
- }
- }
-
- // Find highest peak in correlation array.
- float maxCorrelation = 0.0;
- float sumOfPeaks = 0.0;
- int peakIndex = 0;
- const int skip = MAX_ZEROTH_PARTIAL_BINS; // skip low bins
- for (int i = skip; i < numCorrelations; i++) {
- if (harmonicSums[i] > maxCorrelation) {
- maxCorrelation = harmonicSums[i];
- sumOfPeaks += maxCorrelation;
- peakIndex = i;
- printf("maxCorrelation = %f at %d\n", maxCorrelation, peakIndex);
- }
- }
-
- report->latencyInFrames = peakIndex;
- if (sumOfPeaks < 0.0001) {
- report->confidence = 0.0;
- } else {
- report->confidence = maxCorrelation / sumOfPeaks;
- }
-
- delete[] correlations;
- delete[] harmonicSums;
- return 0;
-}
-
-class AudioRecording
-{
-public:
- AudioRecording() {
- }
- ~AudioRecording() {
- delete[] mData;
- }
-
- void allocate(int maxFrames) {
- delete[] mData;
- mData = new float[maxFrames];
- mMaxFrames = maxFrames;
- }
-
- // Write SHORT data from the first channel.
- int write(int16_t *inputData, int inputChannelCount, int numFrames) {
- // stop at end of buffer
- if ((mFrameCounter + numFrames) > mMaxFrames) {
- numFrames = mMaxFrames - mFrameCounter;
- }
- for (int i = 0; i < numFrames; i++) {
- mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
- }
- return numFrames;
- }
-
- // Write FLOAT data from the first channel.
- int write(float *inputData, int inputChannelCount, int numFrames) {
- // stop at end of buffer
- if ((mFrameCounter + numFrames) > mMaxFrames) {
- numFrames = mMaxFrames - mFrameCounter;
- }
- for (int i = 0; i < numFrames; i++) {
- mData[mFrameCounter++] = inputData[i * inputChannelCount];
- }
- return numFrames;
- }
-
- int size() {
- return mFrameCounter;
- }
-
- float *getData() {
- return mData;
- }
-
- int save(const char *fileName, bool writeShorts = true) {
- int written = 0;
- const int chunkSize = 64;
- FILE *fid = fopen(fileName, "wb");
- if (fid == NULL) {
- return -errno;
- }
-
- if (writeShorts) {
- int16_t buffer[chunkSize];
- int32_t framesLeft = mFrameCounter;
- int32_t cursor = 0;
- while (framesLeft) {
- int32_t framesToWrite = framesLeft < chunkSize ? framesLeft : chunkSize;
- for (int i = 0; i < framesToWrite; i++) {
- buffer[i] = (int16_t) (mData[cursor++] * 32767);
- }
- written += fwrite(buffer, sizeof(int16_t), framesToWrite, fid);
- framesLeft -= framesToWrite;
- }
- } else {
- written = fwrite(mData, sizeof(float), mFrameCounter, fid);
- }
- fclose(fid);
- return written;
- }
-
-private:
- float *mData = nullptr;
- int32_t mFrameCounter = 0;
- int32_t mMaxFrames = 0;
-};
-
-// ====================================================================================
-class LoopbackProcessor {
-public:
- virtual ~LoopbackProcessor() = default;
-
- virtual void process(float *inputData, int inputChannelCount,
- float *outputData, int outputChannelCount,
- int numFrames) = 0;
-
-
- virtual void report() = 0;
-
- void setSampleRate(int32_t sampleRate) {
- mSampleRate = sampleRate;
- }
-
- int32_t getSampleRate() {
- return mSampleRate;
- }
-
-private:
- int32_t mSampleRate = SAMPLE_RATE;
-};
-
-
-// ====================================================================================
-class EchoAnalyzer : public LoopbackProcessor {
-public:
-
- EchoAnalyzer() : LoopbackProcessor() {
- audioRecorder.allocate(NUM_SECONDS * SAMPLE_RATE);
- }
-
- void setGain(float gain) {
- mGain = gain;
- }
-
- float getGain() {
- return mGain;
- }
-
- void report() override {
-
- const float *needle = s_Impulse;
- int needleSize = (int)(sizeof(s_Impulse) / sizeof(float));
- float *haystack = audioRecorder.getData();
- int haystackSize = audioRecorder.size();
- int result = measureLatencyFromEchos(haystack, haystackSize,
- needle, needleSize,
- &latencyReport);
- if (latencyReport.confidence < 0.01) {
- printf(" ERROR - confidence too low = %f\n", latencyReport.confidence);
- } else {
- double latencyMillis = 1000.0 * latencyReport.latencyInFrames / getSampleRate();
- printf(RESULT_TAG "latency.frames = %8.2f\n", latencyReport.latencyInFrames);
- printf(RESULT_TAG "latency.msec = %8.2f\n", latencyMillis);
- printf(RESULT_TAG "latency.confidence = %8.6f\n", latencyReport.confidence);
- }
- }
-
- void process(float *inputData, int inputChannelCount,
- float *outputData, int outputChannelCount,
- int numFrames) override {
- int channelsValid = std::min(inputChannelCount, outputChannelCount);
-
- audioRecorder.write(inputData, inputChannelCount, numFrames);
-
- if (mLoopCounter < mLoopStart) {
- // Output silence at the beginning.
- for (int i = 0; i < numFrames; i++) {
- int ic;
- for (ic = 0; ic < outputChannelCount; ic++) {
- outputData[ic] = 0;
- }
- inputData += inputChannelCount;
- outputData += outputChannelCount;
- }
- } else if (mLoopCounter == mLoopStart) {
- // Send a bipolar impulse that we can easily detect.
- for (float sample : s_Impulse) {
- *outputData = sample;
- outputData += outputChannelCount;
- }
- } else {
- // Echo input to output.
- for (int i = 0; i < numFrames; i++) {
- int ic;
- for (ic = 0; ic < channelsValid; ic++) {
- outputData[ic] = inputData[ic] * mGain;
- }
- for (; ic < outputChannelCount; ic++) {
- outputData[ic] = 0;
- }
- inputData += inputChannelCount;
- outputData += outputChannelCount;
- }
- }
-
- mLoopCounter++;
- }
-
-private:
- int mLoopCounter = 0;
- int mLoopStart = 1000;
- float mGain = 1.0f;
-
- AudioRecording audioRecorder;
- LatencyReport latencyReport;
-};
-
-
-// ====================================================================================
-class SineAnalyzer : public LoopbackProcessor {
-public:
-
- void report() override {
- double magnitude = calculateMagnitude();
- printf("sine magnitude = %7.5f\n", magnitude);
- printf("sine frames = %7d\n", mFrameCounter);
- printf("sine frequency = %7.1f Hz\n", mFrequency);
- }
-
- double calculateMagnitude(double *phasePtr = NULL) {
- if (mFrameCounter == 0) {
- return 0.0;
- }
- double sinMean = mSinAccumulator / mFrameCounter;
- double cosMean = mCosAccumulator / mFrameCounter;
- double magnitude = 2.0 * sqrt( (sinMean * sinMean) + (cosMean * cosMean ));
- if( phasePtr != NULL )
- {
- double phase = atan2( sinMean, cosMean );
- *phasePtr = phase;
- }
- return magnitude;
- }
-
- void process(float *inputData, int inputChannelCount,
- float *outputData, int outputChannelCount,
- int numFrames) override {
- double phaseIncrement = 2.0 * M_PI * mFrequency / getSampleRate();
-
- for (int i = 0; i < numFrames; i++) {
- // Multiply input by sine/cosine
- float sample = inputData[i * inputChannelCount];
- float sinOut = sinf(mPhase);
- mSinAccumulator += sample * sinOut;
- mCosAccumulator += sample * cosf(mPhase);
- // Advance and wrap phase
- mPhase += phaseIncrement;
- if (mPhase > (2.0 * M_PI)) {
- mPhase -= (2.0 * M_PI);
- }
-
- // Output sine wave so we can measure it.
- outputData[i * outputChannelCount] = sinOut;
- }
- mFrameCounter += numFrames;
-
- double magnitude = calculateMagnitude();
- if (mWaiting) {
- if (magnitude < 0.001) {
- // discard silence
- mFrameCounter = 0;
- mSinAccumulator = 0.0;
- mCosAccumulator = 0.0;
- } else {
- mWaiting = false;
- }
- }
- };
-
- void setFrequency(int32_t frequency) {
- mFrequency = frequency;
- }
-
- int32_t getFrequency() {
- return mFrequency;
- }
-
-private:
- double mFrequency = 300.0;
- double mPhase = 0.0;
- int32_t mFrameCounter = 0;
- double mSinAccumulator = 0.0;
- double mCosAccumulator = 0.0;
- bool mWaiting = true;
-};
+#define APP_VERSION "0.1.22"
-// TODO make this a class that manages its own buffer allocation
struct LoopbackData {
AAudioStream *inputStream = nullptr;
int32_t inputFramesMaximum = 0;
int16_t *inputData = nullptr;
+ int16_t peakShort = 0;
float *conversionBuffer = nullptr;
int32_t actualInputChannelCount = 0;
int32_t actualOutputChannelCount = 0;
int32_t inputBuffersToDiscard = 10;
+ int32_t minNumFrames = INT32_MAX;
+ int32_t maxNumFrames = 0;
+ bool isDone = false;
- aaudio_result_t inputError;
+ aaudio_result_t inputError = AAUDIO_OK;
+ aaudio_result_t outputError = AAUDIO_OK;
+
SineAnalyzer sineAnalyzer;
EchoAnalyzer echoAnalyzer;
+ AudioRecording audioRecorder;
LoopbackProcessor *loopbackProcessor;
};
@@ -517,6 +94,13 @@
return AAUDIO_CALLBACK_RESULT_STOP;
}
+ if (numFrames > myData->maxNumFrames) {
+ myData->maxNumFrames = numFrames;
+ }
+ if (numFrames < myData->minNumFrames) {
+ myData->minNumFrames = numFrames;
+ }
+
if (myData->inputBuffersToDiscard > 0) {
// Drain the input.
do {
@@ -524,6 +108,7 @@
numFrames, 0);
if (framesRead < 0) {
myData->inputError = framesRead;
+ printf("ERROR in read = %d", framesRead);
result = AAUDIO_CALLBACK_RESULT_STOP;
} else if (framesRead > 0) {
myData->inputBuffersToDiscard--;
@@ -534,9 +119,14 @@
numFrames, 0);
if (framesRead < 0) {
myData->inputError = framesRead;
+ printf("ERROR in read = %d", framesRead);
result = AAUDIO_CALLBACK_RESULT_STOP;
} else if (framesRead > 0) {
+ myData->audioRecorder.write(myData->inputData,
+ myData->actualInputChannelCount,
+ numFrames);
+
int32_t numSamples = framesRead * myData->actualInputChannelCount;
convertPcm16ToFloat(myData->inputData, myData->conversionBuffer, numSamples);
@@ -545,12 +135,25 @@
outputData,
myData->actualOutputChannelCount,
framesRead);
+ myData->isDone = myData->loopbackProcessor->isDone();
+ if (myData->isDone) {
+ result = AAUDIO_CALLBACK_RESULT_STOP;
+ }
}
}
return result;
}
+static void MyErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error)
+{
+ printf("Error Callback, error: %d\n",(int)error);
+ LoopbackData *myData = (LoopbackData *) userData;
+ myData->outputError = error;
+}
static void usage() {
printf("loopback: -n{numBursts} -p{outPerf} -P{inPerf} -t{test} -g{gain} -f{freq}\n");
@@ -571,7 +174,7 @@
}
static aaudio_performance_mode_t parsePerformanceMode(char c) {
- aaudio_performance_mode_t mode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_performance_mode_t mode = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
c = tolower(c);
switch (c) {
case 'n':
@@ -612,86 +215,117 @@
return testMode;
}
+void printAudioGraph(AudioRecording &recording, int numSamples) {
+ int32_t start = recording.size() / 2;
+ int32_t end = start + numSamples;
+ if (end >= recording.size()) {
+ end = recording.size() - 1;
+ }
+ float *data = recording.getData();
+ // Normalize data so we can see it better.
+ float maxSample = 0.01;
+ for (int32_t i = start; i < end; i++) {
+ float samplePos = fabs(data[i]);
+ if (samplePos > maxSample) {
+ maxSample = samplePos;
+ }
+ }
+ float gain = 0.98f / maxSample;
+ for (int32_t i = start; i < end; i++) {
+ float sample = data[i];
+ printf("%5.3f ", sample); // actual value
+ sample *= gain;
+ printAudioScope(sample);
+ }
+}
+
+
// ====================================================================================
// TODO break up this large main() function into smaller functions
int main(int argc, const char **argv)
{
- aaudio_result_t result = AAUDIO_OK;
- LoopbackData loopbackData;
- AAudioStream *outputStream = nullptr;
- int requestedInputChannelCount = NUM_INPUT_CHANNELS;
- const int requestedOutputChannelCount = AAUDIO_UNSPECIFIED;
- const int requestedSampleRate = SAMPLE_RATE;
- int actualSampleRate = 0;
+ AAudioArgsParser argParser;
+ AAudioSimplePlayer player;
+ AAudioSimpleRecorder recorder;
+ LoopbackData loopbackData;
+ AAudioStream *outputStream = nullptr;
+
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ int requestedInputChannelCount = NUM_INPUT_CHANNELS;
+ const int requestedOutputChannelCount = AAUDIO_UNSPECIFIED;
+ int actualSampleRate = 0;
const aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
- aaudio_format_t actualInputFormat;
- aaudio_format_t actualOutputFormat;
+ aaudio_format_t actualInputFormat;
+ aaudio_format_t actualOutputFormat;
+ aaudio_performance_mode_t outputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+
int testMode = TEST_ECHO_LATENCY;
- double frequency = 1000.0;
double gain = 1.0;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
- //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
- aaudio_sharing_mode_t actualSharingMode;
-
- AAudioStreamBuilder *builder = nullptr;
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
int32_t framesPerBurst = 0;
float *outputData = NULL;
double deviation;
double latency;
- aaudio_performance_mode_t outputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
- aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
-
int32_t burstsPerBuffer = 1; // single buffered
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, NULL, _IONBF, (size_t) 0);
+
+ printf("%s - Audio loopback using AAudio V" APP_VERSION "\n", argv[0]);
+
for (int i = 1; i < argc; i++) {
const char *arg = argv[i];
- if (arg[0] == '-') {
- char option = arg[1];
- switch (option) {
- case 'c':
- requestedInputChannelCount = atoi(&arg[2]);
- break;
- case 'f':
- frequency = atof(&arg[2]);
- break;
- case 'g':
- gain = atof(&arg[2]);
- break;
- case 'm':
- AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO);
- break;
- case 'n':
- burstsPerBuffer = atoi(&arg[2]);
- break;
- case 'p':
- outputPerformanceLevel = parsePerformanceMode(arg[2]);
- break;
- case 'P':
- inputPerformanceLevel = parsePerformanceMode(arg[2]);
- break;
- case 't':
- testMode = parseTestMode(arg[2]);
- break;
- default:
- usage();
- exit(0);
- break;
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'C':
+ requestedInputChannelCount = atoi(&arg[2]);
+ break;
+ case 'g':
+ gain = atof(&arg[2]);
+ break;
+ case 'P':
+ inputPerformanceLevel = parsePerformanceMode(arg[2]);
+ break;
+ case 'X':
+ requestedInputSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ break;
+ case 't':
+ testMode = parseTestMode(arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
}
- } else {
- usage();
- exit(0);
- break;
}
+
}
+ if (inputPerformanceLevel < 0) {
+ printf("illegal inputPerformanceLevel = %d\n", inputPerformanceLevel);
+ exit(EXIT_FAILURE);
+ }
+
+ int32_t requestedDuration = argParser.getDurationSeconds();
+ int32_t recordingDuration = std::min(60, requestedDuration);
+ loopbackData.audioRecorder.allocate(recordingDuration * SAMPLE_RATE);
switch(testMode) {
case TEST_SINE_MAGNITUDE:
- loopbackData.sineAnalyzer.setFrequency(frequency);
loopbackData.loopbackProcessor = &loopbackData.sineAnalyzer;
break;
case TEST_ECHO_LATENCY:
@@ -703,106 +337,44 @@
break;
}
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, NULL, _IONBF, (size_t) 0);
-
- printf("%s - Audio loopback using AAudio\n", argv[0]);
-
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&builder);
- if (result < 0) {
- goto finish;
- }
-
- // Request common stream properties.
- AAudioStreamBuilder_setSampleRate(builder, requestedSampleRate);
- AAudioStreamBuilder_setFormat(builder, requestedInputFormat);
- AAudioStreamBuilder_setSharingMode(builder, requestedSharingMode);
-
- // Open the input stream.
- AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_INPUT);
- AAudioStreamBuilder_setPerformanceMode(builder, inputPerformanceLevel);
- AAudioStreamBuilder_setChannelCount(builder, requestedInputChannelCount);
-
- result = AAudioStreamBuilder_openStream(builder, &loopbackData.inputStream);
- printf("AAudioStreamBuilder_openStream(input) returned %d = %s\n",
- result, AAudio_convertResultToText(result));
- if (result < 0) {
- goto finish;
- }
-
- // Create an output stream using the Builder.
- AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_OUTPUT);
- AAudioStreamBuilder_setFormat(builder, requestedOutputFormat);
- AAudioStreamBuilder_setPerformanceMode(builder, outputPerformanceLevel);
- AAudioStreamBuilder_setChannelCount(builder, requestedOutputChannelCount);
- AAudioStreamBuilder_setDataCallback(builder, MyDataCallbackProc, &loopbackData);
-
- result = AAudioStreamBuilder_openStream(builder, &outputStream);
- printf("AAudioStreamBuilder_openStream(output) returned %d = %s\n",
- result, AAudio_convertResultToText(result));
+ printf("OUTPUT stream ----------------------------------------\n");
+ argParser.setFormat(requestedOutputFormat);
+ result = player.open(argParser, MyDataCallbackProc, MyErrorCallbackProc, &loopbackData);
if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
goto finish;
}
+ outputStream = player.getStream();
+ argParser.compareWithStream(outputStream);
- printf("Stream INPUT ---------------------\n");
- loopbackData.actualInputChannelCount = AAudioStream_getChannelCount(loopbackData.inputStream);
- printf(" channelCount: requested = %d, actual = %d\n", requestedInputChannelCount,
- loopbackData.actualInputChannelCount);
- printf(" framesPerBurst = %d\n", AAudioStream_getFramesPerBurst(loopbackData.inputStream));
- printf(" bufferSize = %d\n",
- AAudioStream_getBufferSizeInFrames(loopbackData.inputStream));
- printf(" bufferCapacity = %d\n",
- AAudioStream_getBufferCapacityInFrames(loopbackData.inputStream));
+ actualOutputFormat = AAudioStream_getFormat(outputStream);
+ assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
- actualSharingMode = AAudioStream_getSharingMode(loopbackData.inputStream);
- printf(" sharingMode: requested = %d, actual = %d\n",
- requestedSharingMode, actualSharingMode);
-
- actualInputFormat = AAudioStream_getFormat(loopbackData.inputStream);
- printf(" dataFormat: requested = %d, actual = %d\n",
- requestedInputFormat, actualInputFormat);
- assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
-
- printf(" is MMAP used? = %s\n", AAudioStream_isMMapUsed(loopbackData.inputStream)
- ? "yes" : "no");
-
-
- printf("Stream OUTPUT ---------------------\n");
- // Check to see what kind of stream we actually got.
- actualSampleRate = AAudioStream_getSampleRate(outputStream);
- printf(" sampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
- loopbackData.echoAnalyzer.setSampleRate(actualSampleRate);
-
- loopbackData.actualOutputChannelCount = AAudioStream_getChannelCount(outputStream);
- printf(" channelCount: requested = %d, actual = %d\n", requestedOutputChannelCount,
- loopbackData.actualOutputChannelCount);
-
- actualSharingMode = AAudioStream_getSharingMode(outputStream);
- printf(" sharingMode: requested = %d, actual = %d\n",
- requestedSharingMode, actualSharingMode);
+ printf("INPUT stream ----------------------------------------\n");
+ // Use different parameters for the input.
+ argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
+ argParser.setFormat(requestedInputFormat);
+ argParser.setPerformanceMode(inputPerformanceLevel);
+ argParser.setChannelCount(requestedInputChannelCount);
+ argParser.setSharingMode(requestedInputSharingMode);
+ result = recorder.open(argParser);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ goto finish;
+ }
+ loopbackData.inputStream = recorder.getStream();
+ argParser.compareWithStream(loopbackData.inputStream);
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
framesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
- printf(" framesPerBurst = %d\n", framesPerBurst);
- result = AAudioStream_setBufferSizeInFrames(outputStream, burstsPerBuffer * framesPerBurst);
- if (result < 0) { // may be positive buffer size
- fprintf(stderr, "ERROR - AAudioStream_setBufferSize() returned %d\n", result);
- goto finish;
- }
- printf(" bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(outputStream));
- printf(" bufferCapacity = %d\n", AAudioStream_getBufferCapacityInFrames(outputStream));
+ actualInputFormat = AAudioStream_getFormat(outputStream);
+ assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
- actualOutputFormat = AAudioStream_getFormat(outputStream);
- printf(" dataFormat: requested = %d, actual = %d\n",
- requestedOutputFormat, actualOutputFormat);
- assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
- printf(" is MMAP used? = %s\n", AAudioStream_isMMapUsed(outputStream)
- ? "yes" : "no");
+ loopbackData.actualInputChannelCount = recorder.getChannelCount();
+ loopbackData.actualOutputChannelCount = player.getChannelCount();
// Allocate a buffer for the audio data.
loopbackData.inputFramesMaximum = 32 * framesPerBurst;
@@ -813,49 +385,75 @@
loopbackData.conversionBuffer = new float[loopbackData.inputFramesMaximum *
loopbackData.actualInputChannelCount];
+ loopbackData.loopbackProcessor->reset();
- // Start output first so input stream runs low.
- result = AAudioStream_requestStart(outputStream);
+ result = recorder.start();
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
- result, AAudio_convertResultToText(result));
+ printf("ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
goto finish;
}
- result = AAudioStream_requestStart(loopbackData.inputStream);
+ result = player.start();
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
- result, AAudio_convertResultToText(result));
+ printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
goto finish;
}
printf("------- sleep while the callback runs --------------\n");
fflush(stdout);
- sleep(NUM_SECONDS);
-
+ for (int i = requestedDuration; i > 0 ; i--) {
+ if (loopbackData.inputError != AAUDIO_OK) {
+ printf(" ERROR on input stream\n");
+ break;
+ } else if (loopbackData.outputError != AAUDIO_OK) {
+ printf(" ERROR on output stream\n");
+ break;
+ } else if (loopbackData.isDone) {
+ printf(" test says it is done!\n");
+ break;
+ } else {
+ sleep(1);
+ printf("%4d: ", i);
+ loopbackData.loopbackProcessor->printStatus();
+ int64_t framesWritten = AAudioStream_getFramesWritten(loopbackData.inputStream);
+ int64_t framesRead = AAudioStream_getFramesRead(loopbackData.inputStream);
+ printf(" input written = %lld, read %lld, xruns = %d\n",
+ (long long) framesWritten,
+ (long long) framesRead,
+ AAudioStream_getXRunCount(outputStream)
+ );
+ }
+ }
printf("input error = %d = %s\n",
loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
printf("AAudioStream_getXRunCount %d\n", AAudioStream_getXRunCount(outputStream));
- printf("framesRead = %d\n", (int) AAudioStream_getFramesRead(outputStream));
- printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(outputStream));
+ printf("framesRead = %8d\n", (int) AAudioStream_getFramesRead(outputStream));
+ printf("framesWritten = %8d\n", (int) AAudioStream_getFramesWritten(outputStream));
+ printf("min numFrames = %8d\n", (int) loopbackData.minNumFrames);
+ printf("max numFrames = %8d\n", (int) loopbackData.maxNumFrames);
- loopbackData.loopbackProcessor->report();
+ if (loopbackData.inputError == AAUDIO_OK) {
+ if (testMode == TEST_SINE_MAGNITUDE) {
+ printAudioGraph(loopbackData.audioRecorder, 200);
+ }
+ loopbackData.loopbackProcessor->report();
+ }
-// {
-// int written = loopbackData.audioRecorder.save(FILENAME);
-// printf("wrote %d mono samples to %s on Android device\n", written, FILENAME);
-// }
-
+ {
+ int written = loopbackData.audioRecorder.save(FILENAME);
+ printf("main() wrote %d mono samples to %s on Android device\n", written, FILENAME);
+ }
finish:
- AAudioStream_close(outputStream);
- AAudioStream_close(loopbackData.inputStream);
+ player.close();
+ recorder.close();
delete[] loopbackData.conversionBuffer;
delete[] loopbackData.inputData;
delete[] outputData;
- AAudioStreamBuilder_delete(builder);
printf(RESULT_TAG "error = %d = %s\n", result, AAudio_convertResultToText(result));
if ((result != AAUDIO_OK)) {
diff --git a/media/libaaudio/examples/loopback/src/loopback.sh b/media/libaaudio/examples/loopback/src/loopback.sh
new file mode 100644
index 0000000..bc63125
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/loopback.sh
@@ -0,0 +1,14 @@
+#!/system/bin/sh
+# Run a loopback test in the background after a delay.
+# To run the script enter:
+# adb shell "nohup sh /data/loopback.sh &"
+
+SLEEP_TIME=10
+TEST_COMMAND="aaudio_loopback -pl -Pl -C1 -n2 -m2 -tm -d5"
+
+echo "Plug in USB Mir and Fun Plug."
+echo "Test will start in ${SLEEP_TIME} seconds: ${TEST_COMMAND}"
+sleep ${SLEEP_TIME}
+date > /data/loopreport.txt
+${TEST_COMMAND} >> /data/loopreport.txt
+date >> /data/loopreport.txt
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index 54217a5..46bc99e 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -121,7 +121,7 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
- int32_t mNumberOfBursts = AAUDIO_UNSPECIFIED;
+ int32_t mNumberOfBursts = AAUDIO_UNSPECIFIED;
};
class AAudioArgsParser : public AAudioParameters {
@@ -151,9 +151,13 @@
case 'd':
mDurationSeconds = atoi(&arg[2]);
break;
- case 'm':
- AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO);
- break;
+ case 'm': {
+ aaudio_policy_t policy = AAUDIO_POLICY_AUTO;
+ if (strlen(arg) > 2) {
+ policy = atoi(&arg[2]);
+ }
+ AAudio_setMMapPolicy(policy);
+ } break;
case 'n':
setNumberOfBursts(atoi(&arg[2]));
break;
@@ -198,7 +202,11 @@
printf(" -b{bufferCapacity} frames\n");
printf(" -c{channels} for example 2 for stereo\n");
printf(" -d{duration} in seconds, default is %d\n", DEFAULT_DURATION_SECONDS);
- printf(" -m enable MMAP\n");
+ printf(" -m{0|1|2|3} set MMAP policy\n");
+ printf(" 0 = _UNSPECIFIED, default\n");
+ printf(" 1 = _NEVER\n");
+ printf(" 2 = _AUTO, also if -m is used with no number\n");
+ printf(" 3 = _ALWAYS\n");
printf(" -n{numberOfBursts} for setBufferSize\n");
printf(" -p{performanceMode} set output AAUDIO_PERFORMANCE_MODE*, default NONE\n");
printf(" n for _NONE\n");
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 19f8aff..cc0cb34 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -30,6 +30,11 @@
#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
+// Arbitrary period for glitches, once per second at 48000 Hz.
+#define FORCED_UNDERRUN_PERIOD_FRAMES 48000
+// How long to sleep in a callback to cause an intentional glitch. For testing.
+#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
+
/**
* Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
*/
@@ -219,8 +224,13 @@
typedef struct SineThreadedData_s {
SineGenerator sineOsc1;
SineGenerator sineOsc2;
+ int64_t framesTotal = 0;
+ int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
+ int32_t minNumFrames = INT32_MAX;
+ int32_t maxNumFrames = 0;
int scheduler;
- bool schedulerChecked;
+ bool schedulerChecked = false;
+ bool forceUnderruns = false;
} SineThreadedData_t;
// Callback function that fills the audio output buffer.
@@ -233,16 +243,33 @@
// should not happen but just in case...
if (userData == nullptr) {
- fprintf(stderr, "ERROR - SimplePlayerDataCallbackProc needs userData\n");
+ printf("ERROR - SimplePlayerDataCallbackProc needs userData\n");
return AAUDIO_CALLBACK_RESULT_STOP;
}
SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+ sineData->framesTotal += numFrames;
+
+ if (sineData->forceUnderruns) {
+ if (sineData->framesTotal > sineData->nextFrameToGlitch) {
+ usleep(FORCED_UNDERRUN_SLEEP_MICROS);
+ printf("Simulate glitch at %lld\n", (long long) sineData->framesTotal);
+ sineData->nextFrameToGlitch += FORCED_UNDERRUN_PERIOD_FRAMES;
+ }
+ }
+
if (!sineData->schedulerChecked) {
sineData->scheduler = sched_getscheduler(gettid());
sineData->schedulerChecked = true;
}
+ if (numFrames > sineData->maxNumFrames) {
+ sineData->maxNumFrames = numFrames;
+ }
+ if (numFrames < sineData->minNumFrames) {
+ sineData->minNumFrames = numFrames;
+ }
+
int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
// This code only plays on the first one or two channels.
// TODO Support arbitrary number of channels.
diff --git a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
index 6be9112..1344273 100644
--- a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
+++ b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
@@ -73,12 +73,20 @@
/**
* Only call this after open() has been called.
*/
- int32_t getSamplesPerFrame() {
+ int32_t getChannelCount() {
if (mStream == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
return AAudioStream_getChannelCount(mStream);;
}
+
+ /**
+ * @deprecated use getChannelCount()
+ */
+ int32_t getSamplesPerFrame() {
+ return getChannelCount();
+ }
+
/**
* Only call this after open() has been called.
*/
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 2211b72..b5602e9 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -26,9 +26,7 @@
#include <aaudio/AAudio.h>
#include "AAudioExampleUtils.h"
#include "AAudioSimplePlayer.h"
-
-// Application data that gets passed to the callback.
-#define MAX_FRAME_COUNT_RECORDS 256
+#include "../../utils/AAudioSimplePlayer.h"
int main(int argc, const char **argv)
{
@@ -42,9 +40,10 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V0.1.1\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback V0.1.2\n", argv[0]);
myData.schedulerChecked = false;
+ myData.forceUnderruns = false; // set true to test AAudioStream_getXRunCount()
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
@@ -99,7 +98,10 @@
printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
break;
}
- printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(player.getStream()));
+ printf("framesWritten = %d, underruns = %d\n",
+ (int) AAudioStream_getFramesWritten(player.getStream()),
+ (int) AAudioStream_getXRunCount(player.getStream())
+ );
}
printf("Woke up now.\n");
@@ -120,6 +122,9 @@
SCHED_FIFO);
}
+ printf("min numFrames = %8d\n", (int) myData.minNumFrames);
+ printf("max numFrames = %8d\n", (int) myData.maxNumFrames);
+
printf("SUCCESS\n");
return EXIT_SUCCESS;
error:
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index e5f0deb..30fbdd6 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -793,7 +793,7 @@
* The position and time passed back are monotonically increasing.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
- * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
+ * @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
* @param framePosition pointer to a variable to receive the position
* @param timeNanoseconds pointer to a variable to receive the time
* @return AAUDIO_OK or a negative error
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 0684ed6..6ec285f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -113,7 +113,8 @@
return result;
}
-aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor,
+ aaudio_direction_t direction)
{
aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
if (result != AAUDIO_OK) {
@@ -143,12 +144,20 @@
descriptor->dataAddress
);
- // ============================ down data queue =============================
+ // ============================ data queue =============================
descriptor = &pEndpointDescriptor->dataQueueDescriptor;
ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
- ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
- mFreeRunning = descriptor->readCounterAddress == nullptr;
+ ALOGV("AudioEndpoint::configure() data readCounterAddress = %p",
+ descriptor->readCounterAddress);
+
+ // An example of free running is when the other side is read or written by hardware DMA
+ // or a DSP. It does not update its counter so we have to update it.
+ int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? descriptor->readCounterAddress // read by other side
+ : descriptor->writeCounterAddress; // written by other side
+ mFreeRunning = (remoteCounter == nullptr);
ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
+
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
@@ -173,13 +182,8 @@
return mUpCommandQueue->read(commandPtr, 1);
}
-aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
-{
- return mDataQueue->write(buffer, numFrames);
-}
-
-void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
- mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+ return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}
int32_t AudioEndpoint::getEmptyFramesAvailable()
@@ -187,7 +191,7 @@
return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
}
-void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
{
return mDataQueue->getFullDataAvailable(wrappingBuffer);
}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index e7c6916..81a4f7b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -40,7 +40,8 @@
/**
* Configure based on the EndPointDescriptor_t.
*/
- aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+ aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
+ aaudio_direction_t direction);
/**
* Read from a command passed up from the Server.
@@ -48,17 +49,11 @@
*/
aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
- /**
- * Non-blocking write.
- * @return framesWritten or a negative error code.
- */
- aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
-
- void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+ int32_t getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getEmptyFramesAvailable();
- void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+ int32_t getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getFullFramesAvailable();
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 8b14922..4c7d0f7 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -28,10 +28,10 @@
#include <binder/IServiceManager.h>
#include <aaudio/AAudio.h>
+#include <cutils/properties.h>
#include <utils/String16.h>
#include <utils/Trace.h>
-#include "AudioClock.h"
#include "AudioEndpointParcelable.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
@@ -39,6 +39,7 @@
#include "binding/AAudioServiceMessage.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
+#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"
#include "AudioStreamInternal.h"
@@ -64,7 +65,12 @@
, mFramesPerBurst(16)
, mStreamVolume(1.0f)
, mInService(inService)
- , mServiceInterface(serviceInterface) {
+ , mServiceInterface(serviceInterface)
+ , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+ , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+ {
+ ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
+ mWakeupDelayNanos, mMinimumSleepNanos);
}
AudioStreamInternal::~AudioStreamInternal() {
@@ -135,7 +141,7 @@
}
// Configure endpoint based on descriptor.
- mAudioEndpoint.configure(&mEndpointDescriptor);
+ mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
@@ -472,12 +478,12 @@
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
- const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
+ const char * traceName = "aaProc";
+ const char * fifoName = "aaRdy";
ATRACE_BEGIN(traceName);
- int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
if (ATRACE_ENABLED()) {
- const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
- ATRACE_INT(traceName, fullFrames);
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
}
aaudio_result_t result = AAUDIO_OK;
@@ -505,10 +511,12 @@
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (framesLeft > 0) {
- // clip the wake time to something reasonable
- if (wakeTimeNanos < currentTimeNanos) {
- wakeTimeNanos = currentTimeNanos;
+ if (!mAudioEndpoint.isFreeRunning()) {
+ // If there is software on the other end of the FIFO then it may get delayed.
+ // So wake up just a little after we expect it to be ready.
+ wakeTimeNanos += mWakeupDelayNanos;
}
+
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
// TODO remove after we fix the deadline bug
@@ -525,12 +533,30 @@
break;
}
- int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
- AudioClock::sleepForNanos(sleepForNanos);
+ currentTimeNanos = AudioClock::getNanoseconds();
+ int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
+ // Guarantee a minimum sleep time.
+ if (wakeTimeNanos < earliestWakeTime) {
+ wakeTimeNanos = earliestWakeTime;
+ }
+
+ if (ATRACE_ENABLED()) {
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
+ int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
+ }
+
+ AudioClock::sleepUntilNanoTime(wakeTimeNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
+ if (ATRACE_ENABLED()) {
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
+ }
+
// return error or framesProcessed
(void) loopCount;
ATRACE_END();
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 109e425..1b991de 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -27,6 +27,7 @@
#include "client/IsochronousClockModel.h"
#include "client/AudioEndpoint.h"
#include "core/AudioStream.h"
+#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"
using android::sp;
@@ -173,6 +174,11 @@
// Adjust timing model based on timestamp from service.
void processTimestamp(uint64_t position, int64_t time);
+ // Thread on other side of FIFO will have wakeup jitter.
+ // By delaying slightly we can avoid waking up before other side is ready.
+ const int32_t mWakeupDelayNanos; // delay past typical wakeup jitter
+ const int32_t mMinimumSleepNanos; // minimum sleep while polling
+
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 22f8bd1..7b1e53e 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -24,6 +24,9 @@
#include "client/AudioStreamInternalCapture.h"
#include "utility/AudioClock.h"
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <utils/Trace.h>
+
using android::WrappingBuffer;
using namespace aaudio;
@@ -36,7 +39,6 @@
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -52,6 +54,9 @@
return result;
}
+ const char *traceName = "aaRdNow";
+ ATRACE_BEGIN(traceName);
+
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
@@ -63,6 +68,9 @@
// If the write index passed the read index then consider it an overrun.
if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
mXRunCount++;
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaOverRuns", mXRunCount);
+ }
}
// Read some data from the buffer.
@@ -70,6 +78,9 @@
int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
// numFrames, framesProcessed);
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaRead", framesProcessed);
+ }
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesProcessed >= 0) {
@@ -82,14 +93,14 @@
case AAUDIO_STREAM_STATE_OPEN:
case AAUDIO_STREAM_STATE_STARTING:
break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ case AAUDIO_STREAM_STATE_STARTED:
{
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
+ // When do we expect the next write burst to occur?
- uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+ // Calculate frame position based off of the readCounter because
+ // the writeCounter might have just advanced in the background,
+ // causing us to sleep until a later burst.
+ int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
}
break;
@@ -99,10 +110,8 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+ ATRACE_END();
return framesProcessed;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 1b18577..31e0a40 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -18,6 +18,10 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
+#include <utils/Trace.h>
+
#include "client/AudioStreamInternalPlay.h"
#include "utility/AudioClock.h"
@@ -99,6 +103,10 @@
return result;
}
+ const char *traceName = "aaWrNow";
+ ATRACE_BEGIN(traceName);
+
+ // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
if (mAudioEndpoint.isFreeRunning()) {
// Update data queue based on the timing model.
int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
@@ -108,10 +116,10 @@
// If the read index passed the write index then consider it an underrun.
if (mAudioEndpoint.getFullFramesAvailable() < 0) {
- ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
- (int)mAudioEndpoint.getDataWriteCounter(),
- (int)mAudioEndpoint.getDataReadCounter());
mXRunCount++;
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaUnderRuns", mXRunCount);
+ }
}
// Write some data to the buffer.
@@ -119,6 +127,9 @@
int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
// numFrames, framesWritten);
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaWrote", framesWritten);
+ }
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesWritten >= 0) {
@@ -135,14 +146,15 @@
wakeTime = currentNanoTime;
}
break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ case AAUDIO_STREAM_STATE_STARTED:
{
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
+ // When do we expect the next read burst to occur?
- uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+ // Calculate frame position based off of the writeCounter because
+ // the readCounter might have just advanced in the background,
+ // causing us to sleep until a later burst.
+ int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
+ - mAudioEndpoint.getBufferSizeInFrames();
wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
}
break;
@@ -152,10 +164,8 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+ ATRACE_END();
return framesWritten;
}
@@ -170,7 +180,7 @@
mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
- // Read data in one or two parts.
+ // Write data in one or two parts.
int partIndex = 0;
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
int32_t framesToWrite = framesLeft;
@@ -291,6 +301,7 @@
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
AAudioStream_dataCallback appCallback = getDataCallbackProc();
if (appCallback == nullptr) return NULL;
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -302,10 +313,7 @@
mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- // Write audio data to stream.
- int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
-
- // This is a BLOCKING WRITE!
+ // Write audio data to stream. This is a BLOCKING WRITE!
result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 6ea1172..c06c8a9 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -149,7 +149,7 @@
int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
- int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
+ int64_t time = mMarkerNanoTime + nanosDelta;
// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 19b08c4..4859c69 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -82,9 +82,10 @@
mSampleRate, mSamplesPerFrame, mFormat,
AudioStream_convertSharingModeToShortText(mSharingMode),
(getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
- ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
- mDeviceId, mPerformanceMode, mFramesPerDataCallback);
-
+ ALOGI("AudioStream::open() device = %d, perfMode = %d, callback: %s with frames = %d",
+ mDeviceId, mPerformanceMode,
+ (mDataCallbackProc == nullptr ? "OFF" : "ON"),
+ mFramesPerDataCallback);
return AAUDIO_OK;
}
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 6b4a772..8d2c62d 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -105,16 +105,18 @@
}
-void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
fifo_frames_t startIndex = mFifo->getReadIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+ return framesAvailable;
}
-void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
fifo_frames_t startIndex = mFifo->getWriteIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+ return framesAvailable;
}
fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 2b262a1..a94e9b0 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -64,16 +64,18 @@
* if the data is split across the end of the FIFO then set data2 and numFrames2.
* Other wise set them to null
* @param wrappingBuffer
+ * @return total full frames available
*/
- void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+ fifo_frames_t getFullDataAvailable(WrappingBuffer *wrappingBuffer);
/**
* Return pointer to available empty frames in data1 and set size in numFrames1.
* if the room is split across the end of the FIFO then set data2 and numFrames2.
* Other wise set them to null
* @param wrappingBuffer
+ * @return total empty frames available
*/
- void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+ fifo_frames_t getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
/**
* Copy data from the FIFO into the buffer.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index a3198d7..2450920 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -369,12 +369,43 @@
return prop;
}
+int32_t AAudioProperty_getWakeupDelayMicros() {
+ const int32_t minMicros = 0; // arbitrary
+ const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
+ const int32_t maxMicros = 5000; // arbitrary, probably don't want more than 500
+ int32_t prop = property_get_int32(AAUDIO_PROP_WAKEUP_DELAY_USEC, defaultMicros);
+ if (prop < minMicros) {
+ ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, minMicros);
+ prop = minMicros;
+ } else if (prop > maxMicros) {
+ ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, maxMicros);
+ prop = maxMicros;
+ }
+ return prop;
+}
+
+int32_t AAudioProperty_getMinimumSleepMicros() {
+ const int32_t minMicros = 20; // arbitrary
+ const int32_t defaultMicros = 200; // arbitrary
+ const int32_t maxMicros = 2000; // arbitrary
+ int32_t prop = property_get_int32(AAUDIO_PROP_MINIMUM_SLEEP_USEC, defaultMicros);
+ if (prop < minMicros) {
+ ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, minMicros);
+ prop = minMicros;
+ } else if (prop > maxMicros) {
+ ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, maxMicros);
+ prop = maxMicros;
+ }
+ return prop;
+}
+
int32_t AAudioProperty_getHardwareBurstMinMicros() {
const int32_t defaultMicros = 1000; // arbitrary
const int32_t maxMicros = 1000 * 1000; // arbitrary
int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
if (prop < 1 || prop > maxMicros) {
- ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d", prop);
+ ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
+ prop, defaultMicros);
prop = defaultMicros;
}
return prop;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index efd663d..acd319b 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -195,13 +195,35 @@
/**
* Read system property.
- * @return number of bursts per mixer cycle
+ * @return number of bursts per AAudio service mixer cycle
*/
int32_t AAudioProperty_getMixerBursts();
#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
/**
+ * Read a system property that specifies the number of extra microseconds that a thread
+ * should sleep when waiting for another thread to service a FIFO. This is used
+ * to avoid the waking thread from being overly optimistic about the other threads
+ * wakeup timing. This value should be set high enough to cover typical scheduling jitter
+ * for a real-time thread.
+ *
+ * @return number of microseconds to delay the wakeup.
+ */
+int32_t AAudioProperty_getWakeupDelayMicros();
+
+#define AAUDIO_PROP_WAKEUP_DELAY_USEC "aaudio.wakeup_delay_usec"
+
+/**
+ * Read a system property that specifies the minimum sleep time when polling the FIFO.
+ *
+ * @return minimum number of microseconds to sleep.
+ */
+int32_t AAudioProperty_getMinimumSleepMicros();
+
+#define AAUDIO_PROP_MINIMUM_SLEEP_USEC "aaudio.minimum_sleep_usec"
+
+/**
* Read system property.
* This is handy in case the DMA is bursting too quickly for the CPU to keep up.
* For example, there may be a DMA burst every 100 usec but you only
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index 28b96d5..a5fbf14 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -20,7 +20,8 @@
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
-LOCAL_CFLAGS += -fvisibility=hidden -DBUILD_FLOAT
+#-DBUILD_FLOAT
+LOCAL_CFLAGS += -fvisibility=hidden
LOCAL_CFLAGS += -Wall -Werror
LOCAL_HEADER_LIBRARIES += libhardware_headers
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 521dc5f..fb4fe4b 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,7 +1,15 @@
cc_library_headers {
name: "libmedia_headers",
vendor_available: true,
- export_include_dirs: ["include", "omx/1.0/include"],
+ export_include_dirs: ["include"],
+ header_libs:[
+ "libstagefright_headers",
+ "media_plugin_headers",
+ ],
+ export_header_lib_headers: [
+ "libstagefright_headers",
+ "media_plugin_headers",
+ ],
}
cc_library {
@@ -70,13 +78,6 @@
"libutils",
],
- include_dirs: [
- "frameworks/av/include/media",
- "frameworks/native/include", // for media/hardware/MetadataBufferType.h
- "frameworks/native/include/media/openmax",
- "frameworks/av/media/libstagefright",
- ],
-
export_shared_lib_headers: [
"android.hidl.memory@1.0",
"android.hidl.token@1.0-utils",
@@ -202,6 +203,7 @@
"libicui18n",
"libsonivox",
"libmediadrm",
+ "libmedia_helper",
"android.hidl.memory@1.0",
],
@@ -210,15 +212,10 @@
"libc_malloc_debug_backtrace",
],
- include_dirs: [
- "frameworks/native/include/media/openmax",
- "frameworks/av/include/media/",
- "frameworks/av/media/libstagefright",
- ],
-
export_include_dirs: [
"include",
],
+
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 808c2b5..990d260 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -18,15 +18,15 @@
#define LOG_TAG "CharacterEncodingDector"
#include <utils/Log.h>
-#include <CharacterEncodingDetector.h>
+#include <media/CharacterEncodingDetector.h>
#include "CharacterEncodingDetectorTables.h"
-#include "utils/Vector.h"
-#include "StringArray.h"
+#include <utils/Vector.h>
+#include <media/StringArray.h>
-#include "unicode/ucnv.h"
-#include "unicode/ucsdet.h"
-#include "unicode/ustring.h"
+#include <unicode/ucnv.h>
+#include <unicode/ucsdet.h>
+#include <unicode/ustring.h>
namespace android {
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
index 95f7d2e..9724fc1 100644
--- a/media/libmedia/IResourceManagerService.cpp
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -19,7 +19,7 @@
#define LOG_TAG "IResourceManagerService"
#include <utils/Log.h>
-#include "media/IResourceManagerService.h"
+#include <media/IResourceManagerService.h>
#include <binder/Parcel.h>
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index 9f803cb..028616b 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -20,8 +20,8 @@
#include <media/mediascanner.h>
-#include "CharacterEncodingDetector.h"
-#include "StringArray.h"
+#include <media/CharacterEncodingDetector.h>
+#include <media/StringArray.h>
namespace android {
diff --git a/media/libmedia/MidiDeviceInfo.cpp b/media/libmedia/MidiDeviceInfo.cpp
index 02efc5f..7588e00 100644
--- a/media/libmedia/MidiDeviceInfo.cpp
+++ b/media/libmedia/MidiDeviceInfo.cpp
@@ -16,7 +16,7 @@
#define LOG_TAG "MidiDeviceInfo"
-#include "MidiDeviceInfo.h"
+#include <media/MidiDeviceInfo.h>
#include <binder/Parcel.h>
#include <log/log.h>
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index faae954..4e5d67f 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -22,7 +22,7 @@
#include <sys/stat.h>
#include <fcntl.h>
-#include "media/MidiIoWrapper.h"
+#include <media/MidiIoWrapper.h>
static int readAt(void *handle, void *buffer, int pos, int size) {
return ((android::MidiIoWrapper*)handle)->readAt(buffer, pos, size);
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
index b2e5907..7868b85 100644
--- a/media/libmedia/StringArray.cpp
+++ b/media/libmedia/StringArray.cpp
@@ -21,7 +21,7 @@
#include <stdlib.h>
#include <string.h>
-#include "StringArray.h"
+#include <media/StringArray.h>
namespace android {
diff --git a/media/libmedia/include/media/IOMX.h b/media/libmedia/include/media/IOMX.h
index 9a0ada1..d868860 100644
--- a/media/libmedia/include/media/IOMX.h
+++ b/media/libmedia/include/media/IOMX.h
@@ -29,8 +29,8 @@
#include <media/hardware/MetadataBufferType.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
-#include <OMX_Core.h>
-#include <OMX_Video.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_Video.h>
namespace android {
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/Conversion.h b/media/libmedia/include/media/omx/1.0/Conversion.h
similarity index 100%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/Conversion.h
rename to media/libmedia/include/media/omx/1.0/Conversion.h
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/WGraphicBufferSource.h b/media/libmedia/include/media/omx/1.0/WGraphicBufferSource.h
similarity index 100%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/WGraphicBufferSource.h
rename to media/libmedia/include/media/omx/1.0/WGraphicBufferSource.h
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/WOmx.h b/media/libmedia/include/media/omx/1.0/WOmx.h
similarity index 100%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/WOmx.h
rename to media/libmedia/include/media/omx/1.0/WOmx.h
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/WOmxBufferSource.h b/media/libmedia/include/media/omx/1.0/WOmxBufferSource.h
similarity index 98%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/WOmxBufferSource.h
rename to media/libmedia/include/media/omx/1.0/WOmxBufferSource.h
index 86322da..086f648 100644
--- a/media/libmedia/omx/1.0/include/media/omx/1.0/WOmxBufferSource.h
+++ b/media/libmedia/include/media/omx/1.0/WOmxBufferSource.h
@@ -21,7 +21,7 @@
#include <hidl/Status.h>
#include <binder/Binder.h>
-#include <OMXFenceParcelable.h>
+#include <media/OMXFenceParcelable.h>
#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
#include <android/BnOMXBufferSource.h>
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/WOmxNode.h b/media/libmedia/include/media/omx/1.0/WOmxNode.h
similarity index 100%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/WOmxNode.h
rename to media/libmedia/include/media/omx/1.0/WOmxNode.h
diff --git a/media/libmedia/omx/1.0/include/media/omx/1.0/WOmxObserver.h b/media/libmedia/include/media/omx/1.0/WOmxObserver.h
similarity index 100%
rename from media/libmedia/omx/1.0/include/media/omx/1.0/WOmxObserver.h
rename to media/libmedia/include/media/omx/1.0/WOmxObserver.h
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 77f0cab..496db0d 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -77,7 +77,7 @@
#include "TestPlayerStub.h"
#include "nuplayer/NuPlayerDriver.h"
-#include <OMX.h>
+#include <media/stagefright/omx/OMX.h>
#include "HDCP.h"
#include "HTTPBase.h"
@@ -2318,6 +2318,33 @@
}
}
} else {
+ // VolumeShapers are not affected when a track moves between players for
+ // gapless playback (setNextMediaPlayer).
+ // We forward VolumeShaper operations that do not change configuration
+ // to the new player so that unducking may occur as expected.
+ // Unducking is an idempotent operation, same if applied back-to-back.
+ if (configuration->getType() == VolumeShaper::Configuration::TYPE_ID
+ && mNextOutput != nullptr) {
+ ALOGV("applyVolumeShaper: Attempting to forward missed operation: %s %s",
+ configuration->toString().c_str(), operation->toString().c_str());
+ Mutex::Autolock nextLock(mNextOutput->mLock);
+
+ // recycled track should be forwarded from this AudioSink by switchToNextOutput
+ sp<AudioTrack> track = mNextOutput->mRecycledTrack;
+ if (track != nullptr) {
+ ALOGD("Forward VolumeShaper operation to recycled track %p", track.get());
+ (void)track->applyVolumeShaper(configuration, operation);
+ } else {
+ // There is a small chance that the unduck occurs after the next
+ // player has already started, but before it is registered to receive
+ // the unduck command.
+ track = mNextOutput->mTrack;
+ if (track != nullptr) {
+ ALOGD("Forward VolumeShaper operation to track %p", track.get());
+ (void)track->applyVolumeShaper(configuration, operation);
+ }
+ }
+ }
status = mVolumeHandler->applyVolumeShaper(configuration, operation);
}
return status;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index d83c406..aa21fff 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1242,6 +1242,16 @@
mAudioLastDequeueTimeUs = seekTimeUs;
}
+ if (mSubtitleTrack.mSource != NULL) {
+ mSubtitleTrack.mPackets->clear();
+ mFetchSubtitleDataGeneration++;
+ }
+
+ if (mTimedTextTrack.mSource != NULL) {
+ mTimedTextTrack.mPackets->clear();
+ mFetchTimedTextDataGeneration++;
+ }
+
// If currently buffering, post kWhatBufferingEnd first, so that
// NuPlayer resumes. Otherwise, if cache hits high watermark
// before new polling happens, no one will resume the playback.
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6491ceb..d4ec30d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -49,18 +49,18 @@
#include <hidlmemory/mapping.h>
-#include <OMX_AudioExt.h>
-#include <OMX_VideoExt.h>
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-#include <OMX_AsString.h>
+#include <media/openmax/OMX_AudioExt.h>
+#include <media/openmax/OMX_VideoExt.h>
+#include <media/openmax/OMX_Component.h>
+#include <media/openmax/OMX_IndexExt.h>
+#include <media/openmax/OMX_AsString.h>
#include "include/avc_utils.h"
#include "include/ACodecBufferChannel.h"
#include "include/DataConverter.h"
#include "include/SecureBuffer.h"
#include "include/SharedMemoryBuffer.h"
-#include "omx/OMXUtils.h"
+#include <media/stagefright/omx/OMXUtils.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 99e6d45..19973bd 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -68,11 +68,6 @@
"avc_utils.cpp",
],
- include_dirs: [
- "frameworks/native/include/media/openmax",
- "frameworks/native/include/media/hardware",
- ],
-
shared_libs: [
"libaudioutils",
"libbinder",
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 92399f1..93d4f57 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1860,10 +1860,12 @@
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
if (mMeta->findData(kKeyESDS, &type, &data, &size)) {
ESDS esds(data, size);
- if (esds.getCodecSpecificInfo(&data, &size) != OK) {
- data = NULL;
- size = 0;
+ if (esds.getCodecSpecificInfo(&data, &size) == OK &&
+ data != NULL &&
+ copyCodecSpecificData((uint8_t*)data, size) == OK) {
+ mGotAllCodecSpecificData = true;
}
+ return;
}
}
if (data != NULL && copyCodecSpecificData((uint8_t *)data, size) == OK) {
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index a1cf285..73a3965 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -17,7 +17,7 @@
#ifndef SOFT_AAC_2_H_
#define SOFT_AAC_2_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include "aacdecoder_lib.h"
#include "DrcPresModeWrap.h"
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
index 981cbbb..e64c1b7 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_AAC_ENCODER_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
struct VO_AUDIO_CODECAPI;
struct VO_MEM_OPERATOR;
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index 123fd25..681dcf2 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -18,7 +18,7 @@
#define SOFT_AAC_ENCODER_2_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include "aacenc_lib.h"
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
index 758d6ac..869b81d 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
@@ -18,7 +18,7 @@
#define SOFT_AMR_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
namespace android {
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
index 50178c4..c73e4dd 100644
--- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_AMRNB_ENCODER_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
namespace android {
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
index d0c1dab..8950a8c 100644
--- a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_AMRWB_ENCODER_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include "voAMRWB.h"
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 18b7556..679ed3e 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -18,7 +18,7 @@
#define SOFT_H264_DEC_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
#include <sys/time.h>
namespace android {
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 818e4a1..a43cdf1 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -21,7 +21,7 @@
#include <media/stagefright/foundation/ABase.h>
#include <utils/Vector.h>
-#include "SoftVideoEncoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoEncoderOMXComponent.h>
namespace android {
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
index c09081d..4a21c34 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
@@ -18,7 +18,7 @@
#define SOFT_FLAC_DECODER_H
#include "FLACDecoder.h"
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
namespace android {
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
index 6027f76..f4f0655 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_FLAC_ENC_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include "FLAC/stream_encoder.h"
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.h b/media/libstagefright/codecs/g711/dec/SoftG711.h
index 16b6340..3ece246 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.h
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.h
@@ -18,7 +18,7 @@
#define SOFT_G711_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
namespace android {
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.h b/media/libstagefright/codecs/gsm/dec/SoftGSM.h
index 0303dea..ef86915 100644
--- a/media/libstagefright/codecs/gsm/dec/SoftGSM.h
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.h
@@ -18,7 +18,7 @@
#define SOFT_GSM_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
extern "C" {
#include "gsm.h"
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
index e7c2127..5800490 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -18,7 +18,7 @@
#define SOFT_HEVC_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
#include <sys/time.h>
namespace android {
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
index 4114e7d..e399ac9 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
@@ -18,7 +18,7 @@
#define SOFT_MPEG4_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
struct tagvideoDecControls;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index ae8cb6f..00f2dd3 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -19,7 +19,7 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/foundation/ABase.h>
-#include "SoftVideoEncoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoEncoderOMXComponent.h>
#include "mp4enc_api.h"
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index 3bfa6c7..976fd00 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -18,7 +18,7 @@
#define SOFT_MP3_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
struct tPVMP3DecoderExternal;
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 9a69226..9d5f342 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -789,7 +789,7 @@
if (s_dec_op.u4_output_present) {
ssize_t timeStampIdx;
- outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
+ outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
timeStampIdx = getMinTimestampIdx(mTimeStamps, mTimeStampsValid);
if (timeStampIdx < 0) {
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index 6729a54..338fc30 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -18,7 +18,7 @@
#define SOFT_MPEG2_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
#include <sys/time.h>
namespace android {
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 84cf79c..d6bb902 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -18,7 +18,7 @@
#define SOFT_VPX_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_codec.h"
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 86dfad7..dd86d36 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_VPX_ENCODER_H_
-#include "SoftVideoEncoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoEncoderOMXComponent.h>
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index b8c1807..fad988b 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -18,7 +18,7 @@
#define SOFT_AVC_H_
-#include "SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
#include <utils/KeyedVector.h>
#include "H264SwDecApi.h"
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.h b/media/libstagefright/codecs/opus/dec/SoftOpus.h
index 97f6561..fab925d 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.h
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.h
@@ -23,7 +23,7 @@
#define SOFT_OPUS_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
struct OpusMSDecoder;
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 80906b4..ebc2741 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -18,7 +18,7 @@
#define SOFT_RAW_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
struct tPVMP4AudioDecoderExternal;
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index 30d137b..52d1632 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -18,7 +18,7 @@
#define SOFT_VORBIS_H_
-#include "SimpleSoftOMXComponent.h"
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
struct vorbis_dsp_state;
struct vorbis_info;
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index 2aaa884..d6149c0 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -29,7 +29,7 @@
#include <utils/List.h>
#include <utils/RefBase.h>
#include <utils/String16.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
namespace android {
diff --git a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h b/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
index d38c337..d1677fa 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
@@ -25,7 +25,7 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
#include "foundation/ABase.h"
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index 789379a..dfab3b0 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -19,20 +19,19 @@
#include <android-base/logging.h>
#include <gui/IGraphicBufferProducer.h>
-#include <OMX_Core.h>
-#include <OMX_AsString.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_AsString.h>
-#include "../OMXUtils.h"
-#include "../OMXMaster.h"
-#include "../GraphicBufferSource.h"
+#include <media/stagefright/omx/OMXUtils.h>
+#include <media/stagefright/omx/OMXMaster.h>
+#include <media/stagefright/omx/GraphicBufferSource.h>
-#include "WOmxNode.h"
-#include "WOmxObserver.h"
-#include "WGraphicBufferProducer.h"
-#include "WGraphicBufferSource.h"
-#include "Conversion.h"
-
-#include "Omx.h"
+#include <media/stagefright/omx/1.0/WOmxNode.h>
+#include <media/stagefright/omx/1.0/WOmxObserver.h>
+#include <media/stagefright/omx/1.0/WGraphicBufferProducer.h>
+#include <media/stagefright/omx/1.0/WGraphicBufferSource.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
+#include <media/stagefright/omx/1.0/Omx.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/OmxStore.cpp b/media/libstagefright/omx/1.0/OmxStore.cpp
index 0e37af9..a82625a 100644
--- a/media/libstagefright/omx/1.0/OmxStore.cpp
+++ b/media/libstagefright/omx/1.0/OmxStore.cpp
@@ -19,8 +19,8 @@
#include <android-base/logging.h>
-#include "Conversion.h"
-#include "OmxStore.h"
+#include <media/stagefright/omx/1.0/Conversion.h>
+#include <media/stagefright/omx/1.0/OmxStore.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
index acda060..fcf1092 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
@@ -18,9 +18,9 @@
#include <android-base/logging.h>
-#include "WGraphicBufferProducer.h"
-#include "WProducerListener.h"
-#include "Conversion.h"
+#include <media/stagefright/omx/1.0/WGraphicBufferProducer.h>
+#include <media/stagefright/omx/1.0/WProducerListener.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
#include <system/window.h>
namespace android {
@@ -64,10 +64,9 @@
sp<Fence> fence;
::android::FrameEventHistoryDelta outTimestamps;
status_t status = mBase->dequeueBuffer(
- &slot, &fence,
- width, height,
- static_cast<::android::PixelFormat>(format), usage,
- getFrameTimestamps ? &outTimestamps : nullptr);
+ &slot, &fence, width, height,
+ static_cast<::android::PixelFormat>(format), usage, nullptr,
+ getFrameTimestamps ? &outTimestamps : nullptr);
hidl_handle tFence;
FrameEventHistoryDelta tOutTimestamps;
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
index d8540f8..3697429 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -17,15 +17,14 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "TWGraphicBufferSource"
+#include <media/stagefright/omx/1.0/WGraphicBufferSource.h>
+#include <media/stagefright/omx/1.0/WOmxNode.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
+#include <media/stagefright/omx/OMXUtils.h>
#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-
-#include "omx/OMXUtils.h"
-#include "WGraphicBufferSource.h"
-#include "WOmxNode.h"
-#include "Conversion.h"
+#include <media/openmax/OMX_Component.h>
+#include <media/openmax/OMX_IndexExt.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WOmxBufferSource.cpp b/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
index 803283a..c8c963f 100644
--- a/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
@@ -16,8 +16,8 @@
#include <utils/String8.h>
-#include "WOmxBufferSource.h"
-#include "Conversion.h"
+#include <media/stagefright/omx/1.0/WOmxBufferSource.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
index 91d1010..9f82283 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -16,9 +16,9 @@
#include <algorithm>
-#include "WOmxNode.h"
-#include "WOmxBufferSource.h"
-#include "Conversion.h"
+#include <media/stagefright/omx/1.0/WOmxNode.h>
+#include <media/stagefright/omx/1.0/WOmxBufferSource.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WOmxObserver.cpp b/media/libstagefright/omx/1.0/WOmxObserver.cpp
index 354db29..ccbe25c 100644
--- a/media/libstagefright/omx/1.0/WOmxObserver.cpp
+++ b/media/libstagefright/omx/1.0/WOmxObserver.cpp
@@ -16,14 +16,14 @@
#define LOG_TAG "WOmxObserver-impl"
-#include <vector>
-
#include <android-base/logging.h>
#include <cutils/native_handle.h>
#include <binder/Binder.h>
-#include "WOmxObserver.h"
-#include "Conversion.h"
+#include <media/stagefright/omx/1.0/WOmxObserver.h>
+#include <media/stagefright/omx/1.0/Conversion.h>
+
+#include <vector>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WProducerListener.cpp b/media/libstagefright/omx/1.0/WProducerListener.cpp
index be0d4d5..bdc3aa1 100644
--- a/media/libstagefright/omx/1.0/WProducerListener.cpp
+++ b/media/libstagefright/omx/1.0/WProducerListener.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "WProducerListener.h"
+#include <media/stagefright/omx/1.0/WProducerListener.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index b60ce16..3027cdd 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -25,6 +25,10 @@
"1.0/WOmxBufferSource.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
include_dirs: [
"frameworks/av/include/media/",
"frameworks/av/media/libstagefright",
@@ -34,6 +38,14 @@
"frameworks/native/include/media/openmax",
],
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ export_header_lib_headers: [
+ "media_plugin_headers",
+ ],
+
shared_libs: [
"libbase",
"libbinder",
@@ -60,7 +72,9 @@
export_shared_lib_headers: [
"android.hidl.memory@1.0",
- "libstagefright_xmlparser",
+ "libmedia_omx",
+ "libstagefright_foundation",
+ "libstagefright_xmlparser",
],
cflags: [
@@ -85,12 +99,21 @@
cc_library_static {
name: "libstagefright_omx_utils",
srcs: ["OMXUtils.cpp"],
- include_dirs: [
- "frameworks/av/media/libstagefright",
- "frameworks/native/include/media/hardware",
- "frameworks/native/include/media/openmax",
+ export_include_dirs: [
+ "include",
],
- shared_libs: ["libmedia"],
+ header_libs: [
+ "media_plugin_headers",
+ ],
+ export_header_lib_headers: [
+ "media_plugin_headers",
+ ],
+ shared_libs: [
+ "libmedia",
+ ],
+ export_shared_lib_headers: [
+ "libmedia",
+ ],
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.cpp b/media/libstagefright/omx/BWGraphicBufferSource.cpp
index 79f6d93..94ef598 100644
--- a/media/libstagefright/omx/BWGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/BWGraphicBufferSource.cpp
@@ -17,15 +17,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "BWGraphicBufferSource"
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-
+#include <media/stagefright/omx/BWGraphicBufferSource.h>
+#include <media/stagefright/omx/OMXUtils.h>
+#include <media/openmax/OMX_Component.h>
+#include <media/openmax/OMX_IndexExt.h>
#include <media/OMXBuffer.h>
#include <media/IOMX.h>
-#include "OMXUtils.h"
-#include "BWGraphicBufferSource.h"
-
namespace android {
static const OMX_U32 kPortIndexInput = 0;
diff --git a/media/libstagefright/omx/FrameDropper.cpp b/media/libstagefright/omx/FrameDropper.cpp
index 9a4952e..0c50c58 100644
--- a/media/libstagefright/omx/FrameDropper.cpp
+++ b/media/libstagefright/omx/FrameDropper.cpp
@@ -18,8 +18,7 @@
#define LOG_TAG "FrameDropper"
#include <utils/Log.h>
-#include "FrameDropper.h"
-
+#include <media/stagefright/omx/FrameDropper.h>
#include <media/stagefright/foundation/ADebug.h>
namespace android {
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index ef4d745..caf3ac8 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -22,7 +22,9 @@
#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
-#include "GraphicBufferSource.h"
+#include <media/stagefright/omx/GraphicBufferSource.h>
+#include <media/stagefright/omx/FrameDropper.h>
+#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ColorUtils.h>
@@ -31,14 +33,12 @@
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
#include <gui/BufferItem.h>
-#include <HardwareAPI.h>
-#include "omx/OMXUtils.h"
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-#include "media/OMXBuffer.h"
+#include <media/hardware/HardwareAPI.h>
+#include <media/openmax/OMX_Component.h>
+#include <media/openmax/OMX_IndexExt.h>
+#include <media/OMXBuffer.h>
#include <inttypes.h>
-#include "FrameDropper.h"
#include <functional>
#include <memory>
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 8c1141d..93b4dbe 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -22,15 +22,12 @@
#include <dlfcn.h>
-#include "../include/OMX.h"
-
-#include "../include/OMXNodeInstance.h"
-
+#include <media/stagefright/omx/OMX.h>
+#include <media/stagefright/omx/OMXNodeInstance.h>
+#include <media/stagefright/omx/BWGraphicBufferSource.h>
+#include <media/stagefright/omx/OMXMaster.h>
+#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/foundation/ADebug.h>
-#include "BWGraphicBufferSource.h"
-
-#include "OMXMaster.h"
-#include "OMXUtils.h"
namespace android {
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index ac9b0c3..fd97fdc 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -18,15 +18,13 @@
#define LOG_TAG "OMXMaster"
#include <utils/Log.h>
-#include "OMXMaster.h"
-
-#include "SoftOMXPlugin.h"
+#include <media/stagefright/omx/OMXMaster.h>
+#include <media/stagefright/omx/SoftOMXPlugin.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <dlfcn.h>
#include <fcntl.h>
-#include <media/stagefright/foundation/ADebug.h>
-
namespace android {
OMXMaster::OMXMaster()
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index bc4ce9d..c749454 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -20,20 +20,20 @@
#include <inttypes.h>
-#include "../include/OMXNodeInstance.h"
-#include "OMXMaster.h"
-#include "OMXUtils.h"
+#include <media/stagefright/omx/OMXNodeInstance.h>
+#include <media/stagefright/omx/OMXMaster.h>
+#include <media/stagefright/omx/OMXUtils.h>
#include <android/IOMXBufferSource.h>
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-#include <OMX_VideoExt.h>
-#include <OMX_AsString.h>
+#include <media/openmax/OMX_Component.h>
+#include <media/openmax/OMX_IndexExt.h>
+#include <media/openmax/OMX_VideoExt.h>
+#include <media/openmax/OMX_AsString.h>
#include <binder/IMemory.h>
#include <cutils/properties.h>
#include <gui/BufferQueue.h>
-#include <HardwareAPI.h>
+#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ColorUtils.h>
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index a66d565..5894837 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -19,13 +19,13 @@
#include <string.h>
-#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/hardware/HardwareAPI.h>
#include <media/MediaDefs.h>
#include <system/graphics-base.h>
-#include "OMXUtils.h"
namespace android {
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 761b425..09e6d75 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -18,8 +18,7 @@
#define LOG_TAG "SimpleSoftOMXComponent"
#include <utils/Log.h>
-#include "include/SimpleSoftOMXComponent.h"
-
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
diff --git a/media/libstagefright/omx/SoftOMXComponent.cpp b/media/libstagefright/omx/SoftOMXComponent.cpp
index df978f8..ee269e1 100644
--- a/media/libstagefright/omx/SoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftOMXComponent.cpp
@@ -18,8 +18,7 @@
#define LOG_TAG "SoftOMXComponent"
#include <utils/Log.h>
-#include "include/SoftOMXComponent.h"
-
+#include <media/stagefright/omx/SoftOMXComponent.h>
#include <media/stagefright/foundation/ADebug.h>
namespace android {
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 0bc65e1..4946ada 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -18,8 +18,8 @@
#define LOG_TAG "SoftOMXPlugin"
#include <utils/Log.h>
-#include "SoftOMXPlugin.h"
-#include "include/SoftOMXComponent.h"
+#include <media/stagefright/omx/SoftOMXPlugin.h>
+#include <media/stagefright/omx/SoftOMXComponent.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 920dd18..24ed981 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -20,14 +20,14 @@
#define LOG_TAG "SoftVideoDecoderOMXComponent"
#include <utils/Log.h>
-#include "include/SoftVideoDecoderOMXComponent.h"
+#include <media/stagefright/omx/SoftVideoDecoderOMXComponent.h>
-#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/MediaDefs.h>
+#include <media/hardware/HardwareAPI.h>
+#include <media/MediaDefs.h>
namespace android {
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 7ecfbbb..f33bdc0 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -21,25 +21,22 @@
#include <utils/Log.h>
#include <utils/misc.h>
-#include "include/SoftVideoEncoderOMXComponent.h"
-
-#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/omx/SoftVideoEncoderOMXComponent.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/MediaDefs.h>
+#include <media/hardware/HardwareAPI.h>
+#include <media/openmax/OMX_IndexExt.h>
+#include <media/MediaDefs.h>
#include <ui/Fence.h>
#include <ui/GraphicBufferMapper.h>
#include <ui/Rect.h>
#include <hardware/gralloc.h>
-
#include <nativebase/nativebase.h>
-#include <OMX_IndexExt.h>
-
namespace android {
const static OMX_COLOR_FORMATTYPE kSupportedColorFormats[] = {
diff --git a/media/libstagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
similarity index 99%
rename from media/libstagefright/omx/1.0/Conversion.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
index fd91574..f319bdc 100644
--- a/media/libstagefright/omx/1.0/Conversion.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
@@ -29,12 +29,12 @@
#include <binder/Binder.h>
#include <binder/Status.h>
#include <ui/FenceTime.h>
-#include <media/OMXFenceParcelable.h>
#include <cutils/native_handle.h>
#include <gui/IGraphicBufferProducer.h>
+#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
-#include <VideoAPI.h>
+#include <media/hardware/VideoAPI.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
diff --git a/media/libstagefright/omx/1.0/Omx.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
similarity index 97%
rename from media/libstagefright/omx/1.0/Omx.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
index 5fdf38e..a6a9d3e 100644
--- a/media/libstagefright/omx/1.0/Omx.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
@@ -20,10 +20,9 @@
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
-#include "../../include/OMXNodeInstance.h"
-
-#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <media/stagefright/omx/OMXNodeInstance.h>
#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
namespace android {
diff --git a/media/libstagefright/omx/1.0/OmxStore.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/OmxStore.h
similarity index 100%
rename from media/libstagefright/omx/1.0/OmxStore.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/OmxStore.h
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h
similarity index 100%
rename from media/libstagefright/omx/1.0/WGraphicBufferProducer.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
similarity index 98%
rename from media/libstagefright/omx/1.0/WGraphicBufferSource.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
index 4549c97..b9f22ab 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
@@ -28,7 +28,7 @@
#include <android/BnGraphicBufferSource.h>
-#include "../GraphicBufferSource.h"
+#include <media/stagefright/omx/GraphicBufferSource.h>
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/1.0/WOmxBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxBufferSource.h
similarity index 100%
rename from media/libstagefright/omx/1.0/WOmxBufferSource.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxBufferSource.h
diff --git a/media/libstagefright/omx/1.0/WOmxNode.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxNode.h
similarity index 98%
rename from media/libstagefright/omx/1.0/WOmxNode.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxNode.h
index d715374..38d5885 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxNode.h
@@ -22,7 +22,7 @@
#include <utils/Errors.h>
-#include "../../include/OMXNodeInstance.h"
+#include <media/stagefright/omx/OMXNodeInstance.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
diff --git a/media/libstagefright/omx/1.0/WOmxObserver.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxObserver.h
similarity index 100%
rename from media/libstagefright/omx/1.0/WOmxObserver.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WOmxObserver.h
diff --git a/media/libstagefright/omx/1.0/WProducerListener.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WProducerListener.h
similarity index 100%
rename from media/libstagefright/omx/1.0/WProducerListener.h
rename to media/libstagefright/omx/include/media/stagefright/omx/1.0/WProducerListener.h
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h
similarity index 100%
rename from media/libstagefright/omx/BWGraphicBufferSource.h
rename to media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h
diff --git a/media/libstagefright/omx/FrameDropper.h b/media/libstagefright/omx/include/media/stagefright/omx/FrameDropper.h
similarity index 100%
rename from media/libstagefright/omx/FrameDropper.h
rename to media/libstagefright/omx/include/media/stagefright/omx/FrameDropper.h
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/GraphicBufferSource.h
similarity index 100%
rename from media/libstagefright/omx/GraphicBufferSource.h
rename to media/libstagefright/omx/include/media/stagefright/omx/GraphicBufferSource.h
diff --git a/media/libstagefright/omx/IOmxNodeWrapper.h b/media/libstagefright/omx/include/media/stagefright/omx/IOmxNodeWrapper.h
similarity index 100%
rename from media/libstagefright/omx/IOmxNodeWrapper.h
rename to media/libstagefright/omx/include/media/stagefright/omx/IOmxNodeWrapper.h
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/omx/include/media/stagefright/omx/OMX.h
similarity index 100%
rename from media/libstagefright/include/OMX.h
rename to media/libstagefright/omx/include/media/stagefright/omx/OMX.h
diff --git a/media/libstagefright/omx/OMXMaster.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXMaster.h
similarity index 100%
rename from media/libstagefright/omx/OMXMaster.h
rename to media/libstagefright/omx/include/media/stagefright/omx/OMXMaster.h
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
similarity index 100%
rename from media/libstagefright/include/OMXNodeInstance.h
rename to media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXUtils.h
similarity index 100%
rename from media/libstagefright/omx/OMXUtils.h
rename to media/libstagefright/omx/include/media/stagefright/omx/OMXUtils.h
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
similarity index 100%
rename from media/libstagefright/include/SimpleSoftOMXComponent.h
rename to media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
diff --git a/media/libstagefright/include/SoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
similarity index 100%
rename from media/libstagefright/include/SoftOMXComponent.h
rename to media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
diff --git a/media/libstagefright/omx/SoftOMXPlugin.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXPlugin.h
similarity index 100%
rename from media/libstagefright/omx/SoftOMXPlugin.h
rename to media/libstagefright/omx/include/media/stagefright/omx/SoftOMXPlugin.h
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
similarity index 100%
rename from media/libstagefright/include/SoftVideoDecoderOMXComponent.h
rename to media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
diff --git a/media/libstagefright/include/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
similarity index 100%
rename from media/libstagefright/include/SoftVideoEncoderOMXComponent.h
rename to media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
diff --git a/media/libstagefright/omx/tests/FrameDropper_test.cpp b/media/libstagefright/omx/tests/FrameDropper_test.cpp
index f966b5e..a925da6 100644
--- a/media/libstagefright/omx/tests/FrameDropper_test.cpp
+++ b/media/libstagefright/omx/tests/FrameDropper_test.cpp
@@ -20,7 +20,7 @@
#include <gtest/gtest.h>
-#include "FrameDropper.h"
+#include <media/stagefright/omx/FrameDropper.h>
#include <media/stagefright/foundation/ADebug.h>
namespace android {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 8efcce6..459e4fb 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7877,17 +7877,34 @@
{
AudioParameter param = AudioParameter(keyValuePair);
int value;
+ bool sendToHal = true;
if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ audio_devices_t device = (audio_devices_t)value;
// forward device change to effects that have requested to be
// aware of attached audio device.
- if (value != AUDIO_DEVICE_NONE) {
- mOutDevice = value;
+ if (device != AUDIO_DEVICE_NONE) {
for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->setDevice_l(mOutDevice);
+ mEffectChains[i]->setDevice_l(device);
}
}
+ if (audio_is_output_devices(device)) {
+ mOutDevice = device;
+ if (!isOutput()) {
+ sendToHal = false;
+ }
+ } else {
+ mInDevice = device;
+ if (device != AUDIO_DEVICE_NONE) {
+ mPrevInDevice = value;
+ }
+ // TODO: implement and call checkBtNrec_l();
+ }
}
- status = mHalStream->setParameters(keyValuePair);
+ if (sendToHal) {
+ status = mHalStream->setParameters(keyValuePair);
+ } else {
+ status = NO_ERROR;
+ }
return false;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 55c364f..1a7db26 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -933,7 +933,6 @@
if (stream == AUDIO_STREAM_TTS) {
flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
- getPhoneState() == AUDIO_MODE_IN_COMMUNICATION &&
audio_is_linear_pcm(format)) {
flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
@@ -1647,7 +1646,6 @@
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
- getPhoneState() == AUDIO_MODE_IN_COMMUNICATION &&
audio_is_linear_pcm(format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 2bf73a0..e8fc080 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -85,7 +85,7 @@
virtual binder::Status endConfigure(int operatingMode) override;
- // Returns -EBUSY if device is not idle
+ // Returns -EBUSY if device is not idle or in error state
virtual binder::Status deleteStream(int streamId) override;
virtual binder::Status createStream(
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 0fc3740..69b1d7d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -240,6 +240,7 @@
status_t res = OK;
std::vector<wp<Camera3StreamInterface>> streams;
+ nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
{
Mutex::Autolock l(mLock);
if (mStatus == STATUS_UNINITIALIZED) return res;
@@ -251,7 +252,6 @@
SET_ERR_L("Can't stop streaming");
// Continue to close device even in case of error
} else {
- nsecs_t maxExpectedDuration = getExpectedInFlightDurationLocked();
res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
if (res != OK) {
SET_ERR_L("Timeout waiting for HAL to drain (% " PRIi64 " ns)",
@@ -311,10 +311,10 @@
{
Mutex::Autolock l(mLock);
- mExpectedInflightDuration = 0;
mInterface->clear();
mOutputStreams.clear();
mInputStream.clear();
+ mDeletedStreams.clear();
mBufferManager.clear();
internalUpdateStatusLocked(STATUS_UNINITIALIZED);
}
@@ -1141,6 +1141,7 @@
uint32_t width, uint32_t height, int format, int *id) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
Mutex::Autolock l(mLock);
ALOGV("Camera %s: Creating new input stream %d: %d x %d, format %d",
mId.string(), mNextStreamId, width, height, format);
@@ -1161,7 +1162,7 @@
break;
case STATUS_ACTIVE:
ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
- res = internalPauseAndWaitLocked();
+ res = internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
SET_ERR_L("Can't pause captures to reconfigure streams!");
return res;
@@ -1228,6 +1229,7 @@
int streamSetId, bool isShared, uint32_t consumerUsage) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
Mutex::Autolock l(mLock);
ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
" consumer usage 0x%x, isShared %d", mId.string(), mNextStreamId, width, height, format,
@@ -1249,7 +1251,7 @@
break;
case STATUS_ACTIVE:
ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
- res = internalPauseAndWaitLocked();
+ res = internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
SET_ERR_L("Can't pause captures to reconfigure streams!");
return res;
@@ -1428,6 +1430,12 @@
return -EBUSY;
}
+ if (mStatus == STATUS_ERROR) {
+ ALOGW("%s: Camera %s: deleteStream not allowed in ERROR state",
+ __FUNCTION__, mId.string());
+ return -EBUSY;
+ }
+
sp<Camera3StreamInterface> deletedStream;
ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id);
if (mInputStream != NULL && id == mInputStream->getId()) {
@@ -1494,59 +1502,66 @@
}
Mutex::Autolock il(mInterfaceLock);
- Mutex::Autolock l(mLock);
- switch (mStatus) {
- case STATUS_ERROR:
- CLOGE("Device has encountered a serious error");
- return INVALID_OPERATION;
- case STATUS_UNINITIALIZED:
- CLOGE("Device is not initialized!");
- return INVALID_OPERATION;
- case STATUS_UNCONFIGURED:
- case STATUS_CONFIGURED:
- case STATUS_ACTIVE:
- // OK
- break;
- default:
- SET_ERR_L("Unexpected status: %d", mStatus);
- return INVALID_OPERATION;
- }
+ {
+ Mutex::Autolock l(mLock);
+ switch (mStatus) {
+ case STATUS_ERROR:
+ CLOGE("Device has encountered a serious error");
+ return INVALID_OPERATION;
+ case STATUS_UNINITIALIZED:
+ CLOGE("Device is not initialized!");
+ return INVALID_OPERATION;
+ case STATUS_UNCONFIGURED:
+ case STATUS_CONFIGURED:
+ case STATUS_ACTIVE:
+ // OK
+ break;
+ default:
+ SET_ERR_L("Unexpected status: %d", mStatus);
+ return INVALID_OPERATION;
+ }
- if (!mRequestTemplateCache[templateId].isEmpty()) {
- *request = mRequestTemplateCache[templateId];
- return OK;
+ if (!mRequestTemplateCache[templateId].isEmpty()) {
+ *request = mRequestTemplateCache[templateId];
+ return OK;
+ }
}
camera_metadata_t *rawRequest;
status_t res = mInterface->constructDefaultRequestSettings(
(camera3_request_template_t) templateId, &rawRequest);
- if (res == BAD_VALUE) {
- ALOGI("%s: template %d is not supported on this camera device",
- __FUNCTION__, templateId);
- return res;
- } else if (res != OK) {
- CLOGE("Unable to construct request template %d: %s (%d)",
- templateId, strerror(-res), res);
- return res;
+
+ {
+ Mutex::Autolock l(mLock);
+ if (res == BAD_VALUE) {
+ ALOGI("%s: template %d is not supported on this camera device",
+ __FUNCTION__, templateId);
+ return res;
+ } else if (res != OK) {
+ CLOGE("Unable to construct request template %d: %s (%d)",
+ templateId, strerror(-res), res);
+ return res;
+ }
+
+ set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
+ mRequestTemplateCache[templateId].acquire(rawRequest);
+
+ *request = mRequestTemplateCache[templateId];
}
-
- set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
- mRequestTemplateCache[templateId].acquire(rawRequest);
-
- *request = mRequestTemplateCache[templateId];
return OK;
}
status_t Camera3Device::waitUntilDrained() {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
Mutex::Autolock l(mLock);
- return waitUntilDrainedLocked();
+ return waitUntilDrainedLocked(maxExpectedDuration);
}
-status_t Camera3Device::waitUntilDrainedLocked() {
+status_t Camera3Device::waitUntilDrainedLocked(nsecs_t maxExpectedDuration) {
switch (mStatus) {
case STATUS_UNINITIALIZED:
case STATUS_UNCONFIGURED:
@@ -1562,9 +1577,6 @@
SET_ERR_L("Unexpected status: %d",mStatus);
return INVALID_OPERATION;
}
-
- nsecs_t maxExpectedDuration = getExpectedInFlightDurationLocked();
-
ALOGV("%s: Camera %s: Waiting until idle (%" PRIi64 "ns)", __FUNCTION__, mId.string(),
maxExpectedDuration);
status_t res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
@@ -1583,11 +1595,10 @@
}
// Pause to reconfigure
-status_t Camera3Device::internalPauseAndWaitLocked() {
+status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
mRequestThread->setPaused(true);
mPauseStateNotify = true;
- nsecs_t maxExpectedDuration = getExpectedInFlightDurationLocked();
ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
maxExpectedDuration);
status_t res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
@@ -2339,7 +2350,9 @@
mErrorCause = errorCause;
- mRequestThread->setPaused(true);
+ if (mRequestThread != nullptr) {
+ mRequestThread->setPaused(true);
+ }
internalUpdateStatusLocked(STATUS_ERROR);
// Notify upstream about a device error
@@ -2377,9 +2390,7 @@
}
}
- Mutex::Autolock ml(mLock);
mExpectedInflightDuration += maxExpectedDuration;
-
return OK;
}
@@ -2411,7 +2422,6 @@
mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
}
}
- Mutex::Autolock l(mLock);
mExpectedInflightDuration -= duration;
}
@@ -2493,6 +2503,7 @@
request.pendingOutputBuffers.size(), 0);
}
mInFlightMap.clear();
+ mExpectedInflightDuration = 0;
}
// Then return all inflight buffers not returned by HAL
@@ -2516,16 +2527,60 @@
streamBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
streamBuffer.acquire_fence = -1;
streamBuffer.release_fence = -1;
+
+ // First check if the buffer belongs to deleted stream
+ bool streamDeleted = false;
+ for (auto& stream : mDeletedStreams) {
+ if (streamId == stream->getId()) {
+ streamDeleted = true;
+ // Return buffer to deleted stream
+ camera3_stream* halStream = stream->asHalStream();
+ streamBuffer.stream = halStream;
+ switch (halStream->stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0);
+ if (res != OK) {
+ ALOGE("%s: Can't return output buffer for frame %d to"
+ " stream %d: %s (%d)", __FUNCTION__,
+ frameNumber, streamId, strerror(-res), res);
+ }
+ break;
+ case CAMERA3_STREAM_INPUT:
+ res = stream->returnInputBuffer(streamBuffer);
+ if (res != OK) {
+ ALOGE("%s: Can't return input buffer for frame %d to"
+ " stream %d: %s (%d)", __FUNCTION__,
+ frameNumber, streamId, strerror(-res), res);
+ }
+ break;
+ default: // Bi-direcitonal stream is deprecated
+ ALOGE("%s: stream %d has unknown stream type %d",
+ __FUNCTION__, streamId, halStream->stream_type);
+ break;
+ }
+ break;
+ }
+ }
+ if (streamDeleted) {
+ continue;
+ }
+
+ // Then check against configured streams
if (streamId == inputStreamId) {
streamBuffer.stream = mInputStream->asHalStream();
res = mInputStream->returnInputBuffer(streamBuffer);
if (res != OK) {
ALOGE("%s: Can't return input buffer for frame %d to"
- " its stream:%s (%d)", __FUNCTION__,
- frameNumber, strerror(-res), res);
+ " stream %d: %s (%d)", __FUNCTION__,
+ frameNumber, streamId, strerror(-res), res);
}
} else {
- streamBuffer.stream = mOutputStreams.valueFor(streamId)->asHalStream();
+ ssize_t idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
+ continue;
+ }
+ streamBuffer.stream = mOutputStreams.valueAt(idx)->asHalStream();
returnOutputBuffers(&streamBuffer, /*size*/1, /*timestamp*/ 0);
}
}
@@ -4243,7 +4298,8 @@
return false;
}
-nsecs_t Camera3Device::getExpectedInFlightDurationLocked() {
+nsecs_t Camera3Device::getExpectedInFlightDuration() {
+ Mutex::Autolock al(mInFlightLock);
return mExpectedInflightDuration > kMinInflightDuration ?
mExpectedInflightDuration : kMinInflightDuration;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 686a28b..d700e03 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -486,7 +486,7 @@
* CameraDeviceBase interface we shouldn't need to.
* Must be called with mLock and mInterfaceLock both held.
*/
- status_t internalPauseAndWaitLocked();
+ status_t internalPauseAndWaitLocked(nsecs_t maxExpectedDuration);
/**
* Resume work after internalPauseAndWaitLocked()
@@ -512,7 +512,7 @@
*
* Need to be called with mLock and mInterfaceLock held.
*/
- status_t waitUntilDrainedLocked();
+ status_t waitUntilDrainedLocked(nsecs_t maxExpectedDuration);
/**
* Do common work for setting up a streaming or single capture request.
@@ -915,11 +915,14 @@
// Map from frame number to the in-flight request state
typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap;
- nsecs_t mExpectedInflightDuration = 0;
- Mutex mInFlightLock; // Protects mInFlightMap
+
+ Mutex mInFlightLock; // Protects mInFlightMap and
+ // mExpectedInflightDuration
InFlightMap mInFlightMap;
+ nsecs_t mExpectedInflightDuration = 0;
int mInFlightStatusId;
+
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
bool callback, nsecs_t maxExpectedDuration);
@@ -928,7 +931,7 @@
* Returns the maximum expected time it'll take for all currently in-flight
* requests to complete, based on their settings
*/
- nsecs_t getExpectedInFlightDurationLocked();
+ nsecs_t getExpectedInFlightDuration();
/**
* Tracking for idle detection
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 35096eb..ff2dcef 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -293,6 +293,15 @@
void Camera3InputStream::onBufferFreed(const wp<GraphicBuffer>& gb) {
const sp<GraphicBuffer> buffer = gb.promote();
if (buffer != nullptr) {
+ camera3_stream_buffer streamBuffer =
+ {nullptr, &buffer->handle, 0, -1, -1};
+ // Check if this buffer is outstanding.
+ if (isOutstandingBuffer(streamBuffer)) {
+ ALOGV("%s: Stream %d: Trying to free a buffer that is still being "
+ "processed.", __FUNCTION__, mId);
+ return;
+ }
+
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
if (callback != nullptr) {
callback->onBufferFreed(mId, buffer->handle);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index e77421a..9e6ac79 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -479,6 +479,7 @@
if (res == OK) {
fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
if (buffer->buffer) {
+ Mutex::Autolock l(mOutstandingBuffersLock);
mOutstandingBuffers.push_back(*buffer->buffer);
}
}
@@ -486,11 +487,13 @@
return res;
}
-bool Camera3Stream::isOutstandingBuffer(const camera3_stream_buffer &buffer) {
+bool Camera3Stream::isOutstandingBuffer(const camera3_stream_buffer &buffer) const{
if (buffer.buffer == nullptr) {
return false;
}
+ Mutex::Autolock l(mOutstandingBuffersLock);
+
for (auto b : mOutstandingBuffers) {
if (b == *buffer.buffer) {
return true;
@@ -504,6 +507,8 @@
return;
}
+ Mutex::Autolock l(mOutstandingBuffersLock);
+
for (auto b = mOutstandingBuffers.begin(); b != mOutstandingBuffers.end(); b++) {
if (*b == *buffer.buffer) {
mOutstandingBuffers.erase(b);
@@ -575,6 +580,7 @@
if (res == OK) {
fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false);
if (buffer->buffer) {
+ Mutex::Autolock l(mOutstandingBuffersLock);
mOutstandingBuffers.push_back(*buffer->buffer);
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 0940d62..44fe6b6 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -461,6 +461,9 @@
// INVALID_OPERATION if they cannot be obtained.
virtual status_t getEndpointUsage(uint32_t *usage) const = 0;
+ // Return whether the buffer is in the list of outstanding buffers.
+ bool isOutstandingBuffer(const camera3_stream_buffer& buffer) const;
+
// Tracking for idle state
wp<StatusTracker> mStatusTracker;
// Status tracker component ID
@@ -483,9 +486,6 @@
status_t cancelPrepareLocked();
- // Return whether the buffer is in the list of outstanding buffers.
- bool isOutstandingBuffer(const camera3_stream_buffer& buffer);
-
// Remove the buffer from the list of outstanding buffers.
void removeOutstandingBuffer(const camera3_stream_buffer& buffer);
@@ -502,6 +502,7 @@
// Number of buffers allocated on last prepare call.
size_t mLastMaxCount;
+ mutable Mutex mOutstandingBuffersLock;
// Outstanding buffers dequeued from the stream's buffer queue.
List<buffer_handle_t> mOutstandingBuffers;
diff --git a/services/mediacodec/MediaCodecService.h b/services/mediacodec/MediaCodecService.h
index d64debb..0d2c9d8 100644
--- a/services/mediacodec/MediaCodecService.h
+++ b/services/mediacodec/MediaCodecService.h
@@ -19,7 +19,7 @@
#include <binder/BinderService.h>
#include <media/IMediaCodecService.h>
-#include <include/OMX.h>
+#include <media/stagefright/omx/OMX.h>
namespace android {
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index c59944a..79d6da5 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -32,8 +32,8 @@
#include "minijail.h"
#include <hidl/HidlTransportSupport.h>
-#include <omx/1.0/Omx.h>
-#include <omx/1.0/OmxStore.h>
+#include <media/stagefright/omx/1.0/Omx.h>
+#include <media/stagefright/omx/1.0/OmxStore.h>
using namespace android;
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 52658d1..cbd7fb9 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -12,6 +12,7 @@
dup: 1
ppoll: 1
mmap2: 1
+getrandom: 1
# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
# parser support for '<' is in this needs to be modified to also prevent
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 43203d4..952aa82 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -18,9 +18,17 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
#include <cstring>
+#include <utils/Trace.h>
+
#include "AAudioMixer.h"
+#ifndef AAUDIO_MIXER_ATRACE_ENABLED
+#define AAUDIO_MIXER_ATRACE_ENABLED 1
+#endif
+
using android::WrappingBuffer;
using android::FifoBuffer;
using android::fifo_frames_t;
@@ -41,13 +49,28 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(int trackIndex, FifoBuffer *fifo, float volume) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
fifo_frames_t framesLeft = mFramesPerBurst;
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ ATRACE_BEGIN("aaMix");
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
// Gather the data from the client. May be in two parts.
- fifo->getFullDataAvailable(&wrappingBuffer);
+ fifo_frames_t fullFrames = fifo->getFullDataAvailable(&wrappingBuffer);
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ if (ATRACE_ENABLED()) {
+ char rdyText[] = "aaMixRdy#";
+ char letter = 'A' + (trackIndex % 26);
+ rdyText[sizeof(rdyText) - 2] = letter;
+ ATRACE_INT(rdyText, fullFrames);
+ }
+#else /* MIXER_ATRACE_ENABLED */
+ (void) trackIndex;
+ (void) fullFrames;
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
// Mix data in one or two parts.
int partIndex = 0;
@@ -65,11 +88,15 @@
}
partIndex++;
}
- fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
- if (framesLeft > 0) {
- //ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
- // framesLeft, mFramesPerBurst);
- }
+ // Always advance by one burst even if we do not have the data.
+ // Otherwise the stream timing will drift whenever there is an underflow.
+ // This actual underflow can then be detected by the client for XRun counting.
+ fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst);
+
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ ATRACE_END();
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 9155fec..a8090bc 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -37,7 +37,7 @@
* @param volume
* @return true if underflowed
*/
- bool mix(android::FifoBuffer *fifo, float volume);
+ bool mix(int trackIndex, android::FifoBuffer *fifo, float volume);
void mixPart(float *destination, float *source, int32_t numFrames, float volume);
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index e609ab5..b83b918 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -74,16 +74,18 @@
// Mix data from each active stream.
mMixer.clear();
{ // use lock guard
+ int index = 0;
std::lock_guard <std::mutex> lock(mLockStreams);
for (sp<AAudioServiceStreamShared> sharedStream : mRegisteredStreams) {
if (sharedStream->isRunning()) {
FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
float volume = 1.0; // to match legacy volume
- bool underflowed = mMixer.mix(fifo, volume);
+ bool underflowed = mMixer.mix(index, fifo, volume);
underflowCount += underflowed ? 1 : 0;
// TODO log underflows in each stream
sharedStream->markTransferTime(AudioClock::getNanoseconds());
}
+ index++;
}
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index afa2ff0..ff02c0f 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -154,7 +154,7 @@
status);
return AAUDIO_ERROR_UNAVAILABLE;
} else {
- ALOGD("createMmapBuffer status %d, buffer_size, %d burst_size %d"
+ ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
", Sharable FD: %s",
status,
abs(mMmapBufferinfo.buffer_size_frames),