Merge "aaudio: fix SHARED MMAP mode in server plus other bugs" into oc-dev
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
index 64b772d..f2eb984 100644
--- a/media/libaaudio/examples/write_sine/src/SineGenerator.h
+++ b/media/libaaudio/examples/write_sine/src/SineGenerator.h
@@ -79,7 +79,7 @@
}
}
- double mAmplitude = 0.05; // unitless scaler
+ double mAmplitude = 0.005; // unitless scaler
double mPhase = 0.0;
double mPhaseIncrement = 440 * M_PI * 2 / 48000;
double mFrameRate = 48000;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index d8e5ec1..6525c0a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -23,11 +23,15 @@
#include "SineGenerator.h"
#define SAMPLE_RATE 48000
-#define NUM_SECONDS 10
+#define NUM_SECONDS 5
#define NANOS_PER_MICROSECOND ((int64_t)1000)
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+#define REQUESTED_FORMAT AAUDIO_FORMAT_PCM_I16
+#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+//#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+
static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *modeText = "unknown";
switch (mode) {
@@ -63,23 +67,21 @@
int actualSamplesPerFrame = 0;
const int requestedSampleRate = SAMPLE_RATE;
int actualSampleRate = 0;
- const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
- aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
+ aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
- //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
- int32_t framesPerBurst = 0;
- int32_t framesPerWrite = 0;
- int32_t bufferCapacity = 0;
- int32_t framesToPlay = 0;
- int32_t framesLeft = 0;
- int32_t xRunCount = 0;
- int16_t *data = nullptr;
+ int32_t framesPerBurst = 0;
+ int32_t framesPerWrite = 0;
+ int32_t bufferCapacity = 0;
+ int32_t framesToPlay = 0;
+ int32_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ float *floatData = nullptr;
+ int16_t *shortData = nullptr;
SineGenerator sineOsc1;
SineGenerator sineOsc2;
@@ -88,7 +90,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using AAudio\n", argv[0]);
+ printf("%s - Play a sine wave using AAudio, Z2\n", argv[0]);
// Use an AAudioStreamBuilder to contain requested parameters.
result = AAudio_createStreamBuilder(&aaudioBuilder);
@@ -99,8 +101,8 @@
// Request stream properties.
AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
- AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
- AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, REQUESTED_FORMAT);
+ AAudioStreamBuilder_setSharingMode(aaudioBuilder, REQUESTED_SHARING_MODE);
// Create an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
@@ -124,15 +126,16 @@
actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
printf("SharingMode: requested = %s, actual = %s\n",
- getSharingModeText(requestedSharingMode),
+ getSharingModeText(REQUESTED_SHARING_MODE),
getSharingModeText(actualSharingMode));
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
- printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+ printf("Buffer: framesPerBurst = %d\n",framesPerBurst);
+ printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
- printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
+ printf("Buffer: bufferCapacity = %d, remainder = %d\n",
bufferCapacity, bufferCapacity % framesPerBurst);
// Some DMA might use very short bursts of 16 frames. We don't need to write such small
@@ -144,14 +147,16 @@
printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
// TODO handle other data formats
// Allocate a buffer for the audio data.
- data = new int16_t[framesPerWrite * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ floatData = new float[framesPerWrite * actualSamplesPerFrame];
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ shortData = new int16_t[framesPerWrite * actualSamplesPerFrame];
+ } else {
+ printf("ERROR Unsupported data format!\n");
goto finish;
}
@@ -170,26 +175,41 @@
framesToPlay = actualSampleRate * NUM_SECONDS;
framesLeft = framesToPlay;
while (framesLeft > 0) {
- // Render sine waves to left and right channels.
- sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
- if (actualSamplesPerFrame > 1) {
- sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
+
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ // Render sine waves to left and right channels.
+ sineOsc1.render(&floatData[0], actualSamplesPerFrame, framesPerWrite);
+ if (actualSamplesPerFrame > 1) {
+ sineOsc2.render(&floatData[1], actualSamplesPerFrame, framesPerWrite);
+ }
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ // Render sine waves to left and right channels.
+ sineOsc1.render(&shortData[0], actualSamplesPerFrame, framesPerWrite);
+ if (actualSamplesPerFrame > 1) {
+ sineOsc2.render(&shortData[1], actualSamplesPerFrame, framesPerWrite);
+ }
}
// Write audio data to the stream.
- int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
- int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
- int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
+ int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
+ int32_t minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
+ int32_t actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_write(aaudioStream, floatData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_write(aaudioStream, shortData, minFrames, timeoutNanos);
+ }
if (actual < 0) {
- fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
+ fprintf(stderr, "ERROR - AAudioStream_write() returned %d\n", actual);
goto finish;
} else if (actual == 0) {
- fprintf(stderr, "WARNING - AAudioStream_write() returned %zd\n", actual);
+ fprintf(stderr, "WARNING - AAudioStream_write() returned %d\n", actual);
goto finish;
}
framesLeft -= actual;
// Use timestamp to estimate latency.
+ /*
{
int64_t presentationFrame;
int64_t presentationTime;
@@ -208,13 +228,15 @@
printf("estimatedLatencyMillis %d\n", (int)estimatedLatencyMillis);
}
}
+ */
}
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
finish:
- delete[] data;
+ delete[] floatData;
+ delete[] shortData;
AAudioStream_close(aaudioStream);
AAudioStreamBuilder_delete(aaudioBuilder);
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 9414236..8c1072d 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,8 +31,6 @@
//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-#define CALLBACK_SIZE_FRAMES 128
-
// TODO refactor common code into a single SimpleAAudio class
/**
* Simple wrapper for AAudio that opens a default stream and then calls
@@ -87,8 +85,8 @@
AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
- // AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
+ // AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
// Open an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
@@ -136,7 +134,7 @@
aaudio_result_t start() {
aaudio_result_t result = AAudioStream_requestStart(mStream);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
result, AAudio_convertResultToText(result));
}
return result;
@@ -146,7 +144,7 @@
aaudio_result_t stop() {
aaudio_result_t result = AAudioStream_requestStop(mStream);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
result, AAudio_convertResultToText(result));
}
int32_t xRunCount = AAudioStream_getXRunCount(mStream);
@@ -169,9 +167,6 @@
typedef struct SineThreadedData_s {
SineGenerator sineOsc1;
SineGenerator sineOsc2;
- // Remove these variables used for testing.
- int32_t numFrameCounts;
- int32_t frameCounts[MAX_FRAME_COUNT_RECORDS];
int scheduler;
bool schedulerChecked;
} SineThreadedData_t;
@@ -186,10 +181,6 @@
SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
- if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
- sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
- }
-
if (!sineData->schedulerChecked) {
sineData->scheduler = sched_getscheduler(gettid());
sineData->schedulerChecked = true;
@@ -236,11 +227,10 @@
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback, Z1\n", argv[0]);
player.setSharingMode(SHARING_MODE);
- myData.numFrameCounts = 0;
myData.schedulerChecked = false;
result = player.open(MyDataCallbackProc, &myData);
@@ -291,19 +281,17 @@
}
printf("Woke up now.\n");
+ printf("call stop()\n");
result = player.stop();
if (result != AAUDIO_OK) {
goto error;
}
+ printf("call close()\n");
result = player.close();
if (result != AAUDIO_OK) {
goto error;
}
- // Report data gathered in the callback.
- for (int i = 0; i < myData.numFrameCounts; i++) {
- printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
- }
if (myData.schedulerChecked) {
printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
myData.scheduler,
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 8315c40..3f1bba3 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -75,6 +75,10 @@
return gAAudioService;
}
+static void dropAAudioService() {
+ Mutex::Autolock _l(gServiceLock);
+ gAAudioService.clear(); // force a reconnect
+}
AAudioBinderClient::AAudioBinderClient()
: AAudioServiceInterface() {}
@@ -88,14 +92,26 @@
*/
aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
AAudioStreamConfiguration &configurationOutput) {
+ aaudio_handle_t stream;
+ for (int i = 0; i < 2; i++) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
- const sp<IAAudioService> &service = getAAudioService();
- if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->openStream(request, configurationOutput);
+ stream = service->openStream(request, configurationOutput);
+
+ if (stream == AAUDIO_ERROR_NO_SERVICE) {
+ ALOGE("AAudioBinderClient: lost connection to AAudioService.");
+ dropAAudioService(); // force a reconnect
+ } else {
+ break;
+ }
+ }
+ return stream;
}
aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
-
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->closeStream(streamHandle);
@@ -106,37 +122,33 @@
*/
aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
AudioEndpointParcelable &parcelable) {
-
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->getStreamDescription(streamHandle, parcelable);
}
-/**
-* Start the flow of data.
-*/
aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->startStream(streamHandle);
}
-/**
-* Stop the flow of data such that start() can resume without loss of data.
-*/
aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->startStream(streamHandle);
+ return service->pauseStream(streamHandle);
}
-/**
-* Discard any data held by the underlying HAL or Service.
-*/
+aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->stopStream(streamHandle);
+}
+
aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->startStream(streamHandle);
+ return service->flushStream(streamHandle);
}
/**
@@ -163,5 +175,3 @@
clientProcessId,
clientThreadId);
}
-
-
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 1497177..f7f2808 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -66,6 +66,8 @@
*/
aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+ aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;
+
/**
* Discard any data held by the underlying HAL or Service.
* This is asynchronous. When complete, the service will send a FLUSHED event.
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index 0d5bae5..2de560b 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -35,6 +35,7 @@
GET_STREAM_DESCRIPTION,
START_STREAM,
PAUSE_STREAM,
+ STOP_STREAM,
FLUSH_STREAM,
REGISTER_AUDIO_THREAD,
UNREGISTER_AUDIO_THREAD
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 62fd894..b565499 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -63,6 +63,11 @@
virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
/**
+ * Stop the flow of data after data currently inthe buffer has played.
+ */
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
* Discard any data held by the underlying HAL or Service.
*/
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 19d6d52..d75aa32 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -35,6 +35,7 @@
typedef enum aaudio_service_event_e : uint32_t {
AAUDIO_SERVICE_EVENT_STARTED,
AAUDIO_SERVICE_EVENT_PAUSED,
+ AAUDIO_SERVICE_EVENT_STOPPED,
AAUDIO_SERVICE_EVENT_FLUSHED,
AAUDIO_SERVICE_EVENT_CLOSED,
AAUDIO_SERVICE_EVENT_DISCONNECTED,
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index c2bcd05..09eaa42 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -43,7 +43,6 @@
status = parcel->writeInt32(mSamplesPerFrame);
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mSharingMode);
- ALOGD("AAudioStreamConfiguration.writeToParcel(): mSharingMode = %d", mSharingMode);
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mAudioFormat);
if (status != NO_ERROR) goto error;
@@ -66,7 +65,6 @@
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mSharingMode = (aaudio_sharing_mode_t) temp;
- ALOGD("AAudioStreamConfiguration.readFromParcel(): mSharingMode = %d", mSharingMode);
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mAudioFormat = (aaudio_audio_format_t) temp;
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index ec21f8a..a5c27b9 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -49,6 +49,10 @@
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mDirection);
if (status != NO_ERROR) goto error;
+
+ status = parcel->writeBool(mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
status = mConfiguration.writeToParcel(parcel);
if (status != NO_ERROR) goto error;
return NO_ERROR;
@@ -63,12 +67,18 @@
status_t status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mUserId = (uid_t) temp;
+
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mProcessId = (pid_t) temp;
+
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mDirection = (aaudio_direction_t) temp;
+
+ status = parcel->readBool(&mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
status = mConfiguration.readFromParcel(parcel);
if (status != NO_ERROR) goto error;
return NO_ERROR;
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 992e978..d4bfbe1 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -60,6 +60,15 @@
mDirection = direction;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ void setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ }
+
+
const AAudioStreamConfiguration &getConstantConfiguration() const {
return mConfiguration;
}
@@ -81,6 +90,7 @@
uid_t mUserId;
pid_t mProcessId;
aaudio_direction_t mDirection;
+ bool mSharingModeMatchRequired = false;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index 03fc088..b8ef611 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -45,16 +45,25 @@
Parcel data, reply;
// send command
data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- ALOGE("BpAAudioService::client openStream request dump --------------------");
- request.dump();
+ ALOGV("BpAAudioService::client openStream --------------------");
+ // request.dump();
request.writeToParcel(&data);
status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+ ALOGV("BpAAudioService::client openStream returned %d", err);
if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client openStream transact failed %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
// parse reply
aaudio_handle_t stream;
- reply.readInt32(&stream);
+ err = reply.readInt32(&stream);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (stream < 0) {
+ ALOGE("BpAAudioService::client OPEN_STREAM passed stream %d", stream);
+ return stream;
+ }
err = configurationOutput.readFromParcel(&reply);
if (err != NO_ERROR) {
ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
@@ -71,6 +80,7 @@
data.writeInt32(streamHandle);
status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client closeStream transact failed %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
// parse reply
@@ -145,6 +155,21 @@
return res;
}
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(STOP_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
Parcel data, reply;
// send command
@@ -226,11 +251,11 @@
case OPEN_STREAM: {
request.readFromParcel(&data);
- ALOGD("BnAAudioService::client openStream request dump --------------------");
- request.dump();
+ //ALOGD("BnAAudioService::client openStream request dump --------------------");
+ //request.dump();
stream = openStream(request, configuration);
- ALOGV("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+ //ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
reply->writeInt32(stream);
configuration.writeToParcel(reply);
return NO_ERROR;
@@ -238,18 +263,17 @@
case CLOSE_STREAM: {
data.readInt32(&stream);
- ALOGV("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
result = closeStream(stream);
+ //ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X, result = %d",
+ // stream, result);
reply->writeInt32(result);
return NO_ERROR;
} break;
case GET_STREAM_DESCRIPTION: {
data.readInt32(&stream);
- ALOGI("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
aaudio::AudioEndpointParcelable parcelable;
result = getStreamDescription(stream, parcelable);
- ALOGI("BnAAudioService::onTransact getStreamDescription() returns %d", result);
if (result != AAUDIO_OK) {
return AAudioConvert_aaudioToAndroidStatus(result);
}
@@ -277,7 +301,16 @@
data.readInt32(&stream);
result = pauseStream(stream);
ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
- stream, result);
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case STOP_STREAM: {
+ data.readInt32(&stream);
+ result = stopStream(stream);
+ ALOGV("BnAAudioService::onTransact STOP_STREAM 0x%08X, result = %d",
+ stream, result);
reply->writeInt32(result);
return NO_ERROR;
} break;
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index ab7fd1b..2cee651 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -69,6 +69,12 @@
virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
/**
+ * Stop the flow of data such that the data currently in the buffer is played.
+ * This is asynchronous. When complete, the service will send a STOPPED event.
+ */
+ virtual aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
* Discard any data held by the underlying HAL or Service.
* This is asynchronous. When complete, the service will send a FLUSHED event.
*/
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 649c884..0f501dd 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -61,9 +61,8 @@
return status;
}
if (mSizeInBytes > 0) {
-// FIXME mFd = dup(parcel->readFileDescriptor());
- // Why is the ALSA resource not getting freed?!
- mFd = fcntl(parcel->readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
+ int originalFD = parcel->readFileDescriptor();
+ mFd = fcntl(originalFD, F_DUPFD_CLOEXEC, 0);
if (mFd == -1) {
status = -errno;
ALOGE("SharedMemoryParcelable readFileDescriptor fcntl() failed : %d", status);
@@ -101,11 +100,6 @@
return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mResolvedAddress == nullptr) {
- /* TODO remove
- int fd = fcntl(mFd, F_DUPFD_CLOEXEC, 0);
- ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, (%s)",
- mFd, mSizeInBytes, strerror(errno));
- */
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
MAP_SHARED, mFd, 0);
if (mResolvedAddress == nullptr) {
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index fe049b2..6f87df6 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -59,35 +59,35 @@
ALOGE("AudioEndpoint_validateQueueDescriptor() NULL dataAddress");
return AAUDIO_ERROR_NULL;
}
- ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+ ALOGV("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
type,
descriptor->dataAddress);
- ALOGD("AudioEndpoint_validateQueueDescriptor readCounter at %p, writeCounter at %p",
+ ALOGV("AudioEndpoint_validateQueueDescriptor readCounter at %p, writeCounter at %p",
descriptor->readCounterAddress,
descriptor->writeCounterAddress);
// Try to READ from the data area.
// This code will crash if the mmap failed.
uint8_t value = descriptor->dataAddress[0];
- ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
(int) value);
// Try to WRITE to the data area.
descriptor->dataAddress[0] = value * 3;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote successfully");
if (descriptor->readCounterAddress) {
fifo_counter_t counter = *descriptor->readCounterAddress;
- ALOGD("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
(int) counter);
*descriptor->readCounterAddress = counter;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
}
if (descriptor->writeCounterAddress) {
fifo_counter_t counter = *descriptor->writeCounterAddress;
- ALOGD("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
(int) counter);
*descriptor->writeCounterAddress = counter;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
}
return AAUDIO_OK;
}
@@ -107,7 +107,7 @@
// TODO maybe remove after debugging
aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
if (result != AAUDIO_OK) {
- ALOGD("AudioEndpoint_validateQueueDescriptor returned %d %s",
+ ALOGE("AudioEndpoint_validateQueueDescriptor returned %d %s",
result, AAudio_convertResultToText(result));
return result;
}
@@ -142,10 +142,10 @@
assert(descriptor->framesPerBurst > 0);
assert(descriptor->framesPerBurst < 8 * 1024); // FIXME just for initial debugging
assert(descriptor->dataAddress != nullptr);
- ALOGD("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
- ALOGD("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+ ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+ ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
- ALOGD("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+ ALOGV("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 7304205..af4b93a 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -40,9 +40,6 @@
#define LOG_TIMESTAMPS 0
using android::String16;
-using android::IServiceManager;
-using android::defaultServiceManager;
-using android::interface_cast;
using android::Mutex;
using android::WrappingBuffer;
@@ -53,7 +50,10 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
-#define ALOG_CONDITION (mInService == false)
+//static int64_t s_logCounter = 0;
+//#define MYLOG_CONDITION (mInService == true && s_logCounter++ < 500)
+//#define MYLOG_CONDITION (s_logCounter++ < 500000)
+#define MYLOG_CONDITION (1)
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
@@ -62,8 +62,7 @@
, mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
, mFramesPerBurst(16)
, mServiceInterface(serviceInterface)
- , mInService(inService)
-{
+ , mInService(inService) {
}
AudioStreamInternal::~AudioStreamInternal() {
@@ -84,27 +83,26 @@
if (getFormat() == AAUDIO_UNSPECIFIED) {
setFormat(AAUDIO_FORMAT_PCM_FLOAT);
}
+ // Request FLOAT for the shared mixer.
+ request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
// Build the request to send to the server.
request.setUserId(getuid());
request.setProcessId(getpid());
request.setDirection(getDirection());
+ request.setSharingModeMatchRequired(isSharingModeMatchRequired());
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
- request.getConfiguration().setAudioFormat(getFormat());
- aaudio_sharing_mode_t sharingMode = getSharingMode();
- ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
- request.getConfiguration().setSharingMode(sharingMode);
+ request.getConfiguration().setSharingMode(getSharingMode());
+
request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
- (unsigned int)mServiceStreamHandle);
if (mServiceStreamHandle < 0) {
result = mServiceStreamHandle;
- ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
+ ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
} else {
result = configuration.validate();
if (result != AAUDIO_OK) {
@@ -120,10 +118,9 @@
mDeviceFormat = configuration.getAudioFormat();
result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
- mServiceStreamHandle, result);
if (result != AAUDIO_OK) {
- ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+ ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
+ getLocationName(), result);
mServiceInterface.closeStream(mServiceStreamHandle);
return result;
}
@@ -140,8 +137,19 @@
mAudioEndpoint.configure(&mEndpointDescriptor);
mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
- assert(mFramesPerBurst >= 16);
- assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+ int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
+
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
+ getLocationName(), mFramesPerBurst, capacity);
+ // Validate result from server.
+ if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
+ ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
+ ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
mClockModel.setSampleRate(getSampleRate());
mClockModel.setFramesPerBurst(mFramesPerBurst);
@@ -149,7 +157,8 @@
if (getDataCallbackProc()) {
mCallbackFrames = builder.getFramesPerDataCallback();
if (mCallbackFrames > getBufferCapacity() / 2) {
- ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
+ ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+ mCallbackFrames, getBufferCapacity());
mServiceInterface.closeStream(mServiceStreamHandle);
return AAUDIO_ERROR_OUT_OF_RANGE;
@@ -175,7 +184,8 @@
}
aaudio_result_t AudioStreamInternal::close() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+ mServiceStreamHandle);
if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
@@ -250,7 +260,7 @@
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -275,8 +285,10 @@
int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
// Wait for at least a second or some number of callbacks to join the thread.
- int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
- / getSampleRate();
+ int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
+ * framesPerOperation
+ * AAUDIO_NANOS_PER_SECOND)
+ / getSampleRate();
if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
timeoutNanoseconds = MIN_TIMEOUT_NANOS;
}
@@ -295,28 +307,34 @@
aaudio_result_t AudioStreamInternal::requestPauseInternal()
{
- ALOGD("AudioStreamInternal(): pause()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_PAUSING);
- return mServiceInterface.startStream(mServiceStreamHandle);
+ return mServiceInterface.pauseStream(mServiceStreamHandle);
}
aaudio_result_t AudioStreamInternal::requestPause()
{
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
aaudio_result_t result = stopCallback();
if (result != AAUDIO_OK) {
return result;
}
- return requestPauseInternal();
+ result = requestPauseInternal();
+ ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
+ return result;
}
aaudio_result_t AudioStreamInternal::requestFlush() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -325,35 +343,45 @@
}
void AudioStreamInternal::onFlushFromServer() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t framesFlushed = writeCounter - readCounter;
mFramesOffsetFromService += framesFlushed;
+
// Flush written frames by forcing writeCounter to readCounter.
// This is because we cannot move the read counter in the hardware.
mAudioEndpoint.setDownDataWriteCounter(readCounter);
}
+aaudio_result_t AudioStreamInternal::requestStopInternal()
+{
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ mClockModel.stop(AudioClock::getNanoseconds());
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ return mServiceInterface.stopStream(mServiceStreamHandle);
+}
+
aaudio_result_t AudioStreamInternal::requestStop()
{
- // TODO better implementation of requestStop()
- aaudio_result_t result = requestPause();
- if (result == AAUDIO_OK) {
- aaudio_stream_state_t state;
- result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
- &state,
- 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
- if (result == AAUDIO_OK) {
- result = requestFlush();
- }
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
}
+ result = requestStopInternal();
+ ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
return result;
}
aaudio_result_t AudioStreamInternal::registerThread() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -364,7 +392,6 @@
}
aaudio_result_t AudioStreamInternal::unregisterThread() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -394,16 +421,16 @@
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
int64_t nanoTime = command.timestamp.timestamp;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
(long long) framePosition,
(long long) nanoTime);
int64_t nanosDelta = nanoTime - oldTime;
if (nanosDelta > 0 && oldTime > 0) {
int64_t framesDelta = framePosition - oldPosition;
int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
}
oldPosition = framePosition;
oldTime = nanoTime;
@@ -422,23 +449,27 @@
aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
aaudio_result_t result = AAUDIO_OK;
- ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
switch (message->event.event) {
case AAUDIO_SERVICE_EVENT_STARTED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
setState(AAUDIO_STREAM_STATE_STARTED);
break;
case AAUDIO_SERVICE_EVENT_PAUSED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
setState(AAUDIO_STREAM_STATE_PAUSED);
break;
+ case AAUDIO_SERVICE_EVENT_STOPPED:
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ break;
case AAUDIO_SERVICE_EVENT_FLUSHED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
setState(AAUDIO_STREAM_STATE_FLUSHED);
onFlushFromServer();
break;
case AAUDIO_SERVICE_EVENT_CLOSED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
setState(AAUDIO_STREAM_STATE_CLOSED);
break;
case AAUDIO_SERVICE_EVENT_DISCONNECTED:
@@ -448,7 +479,7 @@
break;
case AAUDIO_SERVICE_EVENT_VOLUME:
mVolume = message->event.dataDouble;
- ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
break;
default:
ALOGW("WARNING - processCommands() Unrecognized event = %d",
@@ -463,7 +494,7 @@
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
AAudioServiceMessage message;
if (mAudioEndpoint.readUpCommand(&message) != 1) {
break; // no command this time, no problem
@@ -478,7 +509,7 @@
break;
default:
- ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
(int) message.what);
result = AAUDIO_ERROR_UNEXPECTED_VALUE;
break;
@@ -497,19 +528,13 @@
int64_t currentTimeNanos = AudioClock::getNanoseconds();
int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
- // buffer, numFrames, (unsigned long long) currentTimeNanos,
- // AAudio_convertStreamStateToText(getState()));
// Write until all the data has been written or until a timeout occurs.
while (framesLeft > 0) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d =====",
- // framesLeft, loopCount++);
// The call to writeNow() will not block. It will just write as much as it can.
int64_t wakeTimeNanos = 0;
aaudio_result_t framesWritten = writeNow(source, framesLeft,
currentTimeNanos, &wakeTimeNanos);
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
if (framesWritten < 0) {
ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
result = framesWritten;
@@ -522,7 +547,6 @@
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (framesLeft > 0) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
// clip the wake time to something reasonable
if (wakeTimeNanos < currentTimeNanos) {
wakeTimeNanos = currentTimeNanos;
@@ -534,16 +558,13 @@
break;
}
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
- // (long long) (wakeTimeNanos - currentTimeNanos));
- AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+ int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ AudioClock::sleepForNanos(sleepForNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
// return error or framesWritten
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
- // result, framesLeft, loopCount);
(void) loopCount;
return (result < 0) ? result : numFrames - framesLeft;
}
@@ -552,17 +573,15 @@
aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
int64_t currentNanoTime, int64_t *wakeTimePtr) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
{
aaudio_result_t result = processCommands();
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
if (result != AAUDIO_OK) {
return result;
}
}
if (mAudioEndpoint.isOutputFreeRunning()) {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
// Update data queue based on the timing model.
int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
@@ -575,9 +594,9 @@
}
// Write some data to the buffer.
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
// numFrames, framesWritten);
// Calculate an ideal time to wake up.
@@ -585,7 +604,7 @@
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
// AAudio_convertStreamStateToText(state));
switch (state) {
case AAUDIO_STREAM_STATE_OPEN:
@@ -612,7 +631,7 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
// (unsigned long long)currentNanoTime,
// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
@@ -623,9 +642,8 @@
// TODO this function needs a major cleanup.
aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
int32_t numFrames) {
- // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
+ // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
WrappingBuffer wrappingBuffer;
- mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
uint8_t *source = (uint8_t *) buffer;
int32_t framesLeft = numFrames;
@@ -640,18 +658,25 @@
if (framesToWrite > framesAvailable) {
framesToWrite = framesAvailable;
}
- int32_t numBytes = getBytesPerFrame();
+ int32_t numBytes = getBytesPerFrame() * framesToWrite;
// TODO handle volume scaling
if (getFormat() == mDeviceFormat) {
// Copy straight through.
memcpy(wrappingBuffer.data[partIndex], source, numBytes);
} else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
- && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
// Data conversion.
AAudioConvert_floatToPcm16(
(const float *) source,
framesToWrite * getSamplesPerFrame(),
(int16_t *) wrappingBuffer.data[partIndex]);
+ } else if (getFormat() == AAUDIO_FORMAT_PCM_I16
+ && mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ // Data conversion.
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ framesToWrite * getSamplesPerFrame(),
+ (float *) wrappingBuffer.data[partIndex]);
} else {
// TODO handle more conversions
ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
@@ -661,6 +686,8 @@
source += numBytes;
framesLeft -= framesToWrite;
+ } else {
+ break;
}
partIndex++;
}
@@ -670,7 +697,7 @@
if (framesWritten > 0) {
incrementFramesWritten(framesWritten);
}
- // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+ // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
return framesWritten;
}
@@ -680,7 +707,15 @@
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
int32_t actualFrames = 0;
+ // Round to the next highest burst size.
+ if (getFramesPerBurst() > 0) {
+ int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ requestedFrames = numBursts * getFramesPerBurst();
+ }
+
aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
+ getLocationName(), requestedFrames, actualFrames);
if (result < 0) {
return result;
} else {
@@ -714,7 +749,7 @@
} else {
mLastFramesRead = framesRead;
}
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
return framesRead;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 1aa3b0f..8244311 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -94,6 +94,7 @@
aaudio_result_t processCommands();
aaudio_result_t requestPauseInternal();
+ aaudio_result_t requestStopInternal();
aaudio_result_t stopCallback();
@@ -129,6 +130,11 @@
int32_t numFrames);
void processTimestamp(uint64_t position, int64_t time);
+
+ const char *getLocationName() const {
+ return mInService ? "SERVICE" : "CLIENT";
+ }
+
// Adjust timing model based on timestamp from service.
IsochronousClockModel mClockModel; // timing model for chasing the HAL
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index c278c8b..21e3e70 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -101,13 +101,13 @@
// or we may be drifting due to a slow HW clock.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
- ALOGI("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+ ALOGV("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
(int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
} else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
// Later than expected timestamp.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
- ALOGI("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+ ALOGV("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
(int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
(int) (mMaxLatenessInNanos / 1000));
}
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 9d69423..97726e6 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -168,16 +168,15 @@
void *userData)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
streamBuilder->setDataCallbackProc(callback);
streamBuilder->setDataCallbackUserData(userData);
}
+
AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
AAudioStream_errorCallback callback,
void *userData)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
streamBuilder->setErrorCallbackProc(callback);
streamBuilder->setErrorCallbackUserData(userData);
}
@@ -186,10 +185,10 @@
int32_t frames)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("%s: frames = %d", __func__, frames);
streamBuilder->setFramesPerDataCallback(frames);
}
+// TODO merge AAudioInternal_openStream into AAudioStreamBuilder_openStream
static aaudio_result_t AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
AAudioStream** streamPtr)
{
@@ -206,7 +205,7 @@
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
- ALOGD("AAudioStreamBuilder_openStream(): builder = %p", builder);
+ ALOGD("AAudioStreamBuilder_openStream() ----------------------------------------------");
AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
return AAudioInternal_openStream(streamBuilder, streamPtr);
}
@@ -228,6 +227,7 @@
if (audioStream != nullptr) {
audioStream->close();
delete audioStream;
+ ALOGD("AAudioStream_close() ----------------------------------------------");
return AAUDIO_OK;
}
return AAUDIO_ERROR_INVALID_HANDLE;
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 7c0b5ae..9690848 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -38,7 +38,6 @@
aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
-
// Copy parameters from the Builder because the Builder may be deleted after this call.
mSamplesPerFrame = builder.getSamplesPerFrame();
mSampleRate = builder.getSampleRate();
@@ -46,6 +45,7 @@
mFormat = builder.getFormat();
mDirection = builder.getDirection();
mSharingMode = builder.getSharingMode();
+ mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
// callbacks
mFramesPerDataCallback = builder.getFramesPerDataCallback();
@@ -53,10 +53,19 @@
mErrorCallbackProc = builder.getErrorCallbackProc();
mDataCallbackUserData = builder.getDataCallbackUserData();
- // TODO validate more parameters.
- if (mErrorCallbackProc != nullptr && mDataCallbackProc == nullptr) {
- ALOGE("AudioStream::open(): disconnect callback cannot be used without a data callback.");
- return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ // This is very helpful for debugging in the future.
+ ALOGI("AudioStream.open(): rate = %d, channels = %d, format = %d, sharing = %d",
+ mSampleRate, mSamplesPerFrame, mFormat, mSharingMode);
+
+ // Check for values that are ridiculously out of range to prevent math overflow exploits.
+ // The service will do a better check.
+ if (mSamplesPerFrame < 0 || mSamplesPerFrame > 128) {
+ ALOGE("AudioStream::open(): samplesPerFrame out of range = %d", mSamplesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mSampleRate < 0 || mSampleRate > 1000000) {
+ ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
+ return AAUDIO_ERROR_INVALID_RATE;
}
if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
ALOGE("AudioStream::open(): illegal direction %d", mDirection);
@@ -70,27 +79,6 @@
close();
}
-aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
- aaudio_stream_state_t endingState,
- int64_t timeoutNanoseconds)
-{
- aaudio_stream_state_t state = getState();
- aaudio_stream_state_t nextState = state;
- if (state == startingState && state != endingState) {
- aaudio_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
- if (result != AAUDIO_OK) {
- return result;
- }
- }
-// It's OK if the expected transition has already occurred.
-// But if we reach an unexpected state then that is an error.
- if (nextState != endingState) {
- return AAUDIO_ERROR_UNEXPECTED_STATE;
- } else {
- return AAUDIO_OK;
- }
-}
-
aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
aaudio_stream_state_t *nextState,
int64_t timeoutNanoseconds)
@@ -123,16 +111,15 @@
return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
}
-// This registers the app's background audio thread with the server before
+// This registers the callback thread with the server before
// passing control to the app. This gives the server an opportunity to boost
// the thread's performance characteristics.
void* AudioStream::wrapUserThread() {
void* procResult = nullptr;
mThreadRegistrationResult = registerThread();
if (mThreadRegistrationResult == AAUDIO_OK) {
- // Call application procedure. This may take a very long time.
+ // Run callback loop. This may take a very long time.
procResult = mThreadProc(mThreadArg);
- ALOGD("AudioStream::mThreadProc() returned");
mThreadRegistrationResult = unregisterThread();
}
return procResult;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 073b9a1..916870b 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -154,6 +154,10 @@
return mSharingMode;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
aaudio_direction_t getDirection() const {
return mDirection;
}
@@ -227,16 +231,6 @@
}
/**
- * Wait for a transition from one state to another.
- * @return AAUDIO_OK if the endingState was observed, or AAUDIO_ERROR_UNEXPECTED_STATE
- * if any state that was not the startingState or endingState was observed
- * or AAUDIO_ERROR_TIMEOUT
- */
- virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
- aaudio_stream_state_t endingState,
- int64_t timeoutNanoseconds);
-
- /**
* This should not be called after the open() call.
*/
void setSampleRate(int32_t sampleRate) {
@@ -294,6 +288,7 @@
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index b135a4b..4e0b8c6 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -30,9 +30,10 @@
#include "legacy/AudioStreamRecord.h"
#include "legacy/AudioStreamTrack.h"
-// Enable a mixer in AAudio service that will mix stream to an ALSA MMAP buffer.
+// Enable a mixer in AAudio service that will mix streams to an ALSA MMAP buffer.
#define MMAP_SHARED_ENABLED 0
-// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer.
+
+// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer directly.
#define MMAP_EXCLUSIVE_ENABLED 0
using namespace aaudio;
@@ -50,7 +51,7 @@
AudioStream* audioStream = nullptr;
AAudioBinderClient *aaudioClient = nullptr;
const aaudio_sharing_mode_t sharingMode = getSharingMode();
- ALOGD("AudioStreamBuilder.build() sharingMode = %d", sharingMode);
+
switch (getDirection()) {
case AAUDIO_DIRECTION_INPUT:
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index c0ee6fe..25baf4c 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -82,6 +82,15 @@
return this;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ AudioStreamBuilder* setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ return this;
+ }
+
int32_t getBufferCapacity() const {
return mBufferCapacity;
}
@@ -109,7 +118,6 @@
return this;
}
-
void *getDataCallbackUserData() const {
return mDataCallbackUserData;
}
@@ -153,6 +161,7 @@
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 857780c..6b4a772 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -60,14 +60,11 @@
, mFramesUnderrunCount(0)
, mUnderrunCount(0)
{
- // TODO Handle possible failures to allocate. Move out of constructor?
mFifo = new FifoControllerIndirect(capacityInFrames,
capacityInFrames,
readIndexAddress,
writeIndexAddress);
mStorageOwned = false;
- ALOGD("FifoProcessor: capacityInFrames = %d, bytesPerFrame = %d",
- capacityInFrames, bytesPerFrame);
}
FifoBuffer::~FifoBuffer() {
@@ -132,8 +129,6 @@
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
fifo_frames_t framesToRead = framesLeft;
fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- //ALOGD("FifoProcessor::read() framesAvailable = %d, partIndex = %d",
- // framesAvailable, partIndex);
if (framesAvailable > 0) {
if (framesToRead > framesAvailable) {
framesToRead = framesAvailable;
@@ -143,6 +138,8 @@
destination += numBytes;
framesLeft -= framesToRead;
+ } else {
+ break;
}
partIndex++;
}
@@ -172,6 +169,8 @@
source += numBytes;
framesLeft -= framesToWrite;
+ } else {
+ break;
}
partIndex++;
}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 87b8b0d..efbbfc5 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -57,7 +57,7 @@
}
}
-void AAudioConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
+void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination) {
for (int i = 0; i < numSamples; i++) {
destination[i] = source[i] * (1.0f / 32768.0f);
}
@@ -78,6 +78,8 @@
status = INVALID_OPERATION;
break;
case AAUDIO_ERROR_UNEXPECTED_VALUE: // TODO redundant?
+ case AAUDIO_ERROR_INVALID_RATE:
+ case AAUDIO_ERROR_INVALID_FORMAT:
case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
status = BAD_VALUE;
break;
@@ -103,7 +105,7 @@
result = AAUDIO_ERROR_INVALID_HANDLE;
break;
case DEAD_OBJECT:
- result = AAUDIO_ERROR_DISCONNECTED;
+ result = AAUDIO_ERROR_NO_SERVICE;
break;
case INVALID_OPERATION:
result = AAUDIO_ERROR_INVALID_STATE;
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 84fa227..65b17bc 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
#include <assert.h>
#include <map>
#include <mutex>
@@ -28,13 +32,18 @@
ANDROID_SINGLETON_STATIC_INSTANCE(AAudioEndpointManager);
AAudioEndpointManager::AAudioEndpointManager()
- : Singleton<AAudioEndpointManager>() {
+ : Singleton<AAudioEndpointManager>()
+ , mInputs()
+ , mOutputs() {
}
-AAudioServiceEndpoint *AAudioEndpointManager::findEndpoint(AAudioService &audioService, int32_t deviceId,
+AAudioServiceEndpoint *AAudioEndpointManager::openEndpoint(AAudioService &audioService, int32_t deviceId,
aaudio_direction_t direction) {
AAudioServiceEndpoint *endpoint = nullptr;
std::lock_guard<std::mutex> lock(mLock);
+
+ // Try to find an existing endpoint.
+ ALOGD("AAudioEndpointManager::openEndpoint(), device = %d, dir = %d", deviceId, direction);
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
endpoint = mInputs[deviceId];
@@ -48,11 +57,11 @@
}
// If we can't find an existing one then open one.
- ALOGD("AAudioEndpointManager::findEndpoint(), found %p", endpoint);
+ ALOGD("AAudioEndpointManager::openEndpoint(), found %p", endpoint);
if (endpoint == nullptr) {
endpoint = new AAudioServiceEndpoint(audioService);
if (endpoint->open(deviceId, direction) != AAUDIO_OK) {
- ALOGD("AAudioEndpointManager::findEndpoint(), open failed");
+ ALOGE("AAudioEndpointManager::findEndpoint(), open failed");
delete endpoint;
endpoint = nullptr;
} else {
@@ -66,22 +75,37 @@
}
}
}
+
+ if (endpoint != nullptr) {
+ // Increment the reference count under this lock.
+ endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
+ }
+
return endpoint;
}
-// FIXME add reference counter for serviceEndpoints and removed on last use.
-
-void AAudioEndpointManager::removeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
- aaudio_direction_t direction = serviceEndpoint->getDirection();
- int32_t deviceId = serviceEndpoint->getDeviceId();
-
+void AAudioEndpointManager::closeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
std::lock_guard<std::mutex> lock(mLock);
- switch(direction) {
- case AAUDIO_DIRECTION_INPUT:
- mInputs.erase(deviceId);
- break;
- case AAUDIO_DIRECTION_OUTPUT:
- mOutputs.erase(deviceId);
- break;
+ if (serviceEndpoint == nullptr) {
+ return;
}
-}
\ No newline at end of file
+
+ // Decrement the reference count under this lock.
+ int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
+ serviceEndpoint->setReferenceCount(newRefCount);
+ if (newRefCount <= 0) {
+ aaudio_direction_t direction = serviceEndpoint->getDirection();
+ int32_t deviceId = serviceEndpoint->getDeviceId();
+
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ mInputs.erase(deviceId);
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ mOutputs.erase(deviceId);
+ break;
+ }
+ serviceEndpoint->close();
+ delete serviceEndpoint;
+ }
+}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index 48b27f0..bbcfc1d 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -39,11 +39,11 @@
* @param direction
* @return endpoint or nullptr
*/
- AAudioServiceEndpoint *findEndpoint(android::AAudioService &audioService,
+ AAudioServiceEndpoint *openEndpoint(android::AAudioService &audioService,
int32_t deviceId,
aaudio_direction_t direction);
- void removeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+ void closeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
private:
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 70da339..43203d4 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -41,7 +41,7 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-void AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
fifo_frames_t framesLeft = mFramesPerBurst;
@@ -67,9 +67,10 @@
}
fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
if (framesLeft > 0) {
- ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
- framesLeft, mFramesPerBurst);
+ //ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
+ // framesLeft, mFramesPerBurst);
}
+ return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
}
void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 2191183..9155fec 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -31,7 +31,13 @@
void clear();
- void mix(android::FifoBuffer *fifo, float volume);
+ /**
+ * Mix from this FIFO
+ * @param fifo
+ * @param volume
+ * @return true if underflowed
+ */
+ bool mix(android::FifoBuffer *fifo, float volume);
void mixPart(float *destination, float *source, int32_t numFrames, float volume);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 723ef63..816d5ab 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -54,8 +54,8 @@
aaudio_result_t result = AAUDIO_OK;
AAudioServiceStreamBase *serviceStream = nullptr;
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ bool sharingModeMatchRequired = request.isSharingModeMatchRequired();
aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
- ALOGE("AAudioService::openStream(): sharingMode = %d", sharingMode);
if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
@@ -77,8 +77,9 @@
}
// if SHARED requested or if EXCLUSIVE failed
- if (serviceStream == nullptr) {
- ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_SHARED");
+ if (sharingMode == AAUDIO_SHARING_MODE_SHARED
+ || (serviceStream == nullptr && !sharingModeMatchRequired)) {
+ ALOGD("AAudioService::openStream(), try AAUDIO_SHARING_MODE_SHARED");
serviceStream = new AAudioServiceStreamShared(*this);
result = serviceStream->open(request, configurationOutput);
configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
@@ -126,9 +127,7 @@
ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("AAudioService::getStreamDescription(), handle = 0x%08x", streamHandle);
aaudio_result_t result = serviceStream->getDescription(parcelable);
- ALOGD("AAudioService::getStreamDescription(), result = %d", result);
// parcelable.dump();
return result;
}
@@ -140,7 +139,6 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->start();
- ALOGD("AAudioService::startStream(), serviceStream->start() returned %d", result);
return result;
}
@@ -154,6 +152,16 @@
return result;
}
+aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->stop();
+ return result;
+}
+
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream == nullptr) {
@@ -168,7 +176,6 @@
pid_t clientThreadId,
int64_t periodNanoseconds) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
if (serviceStream == nullptr) {
ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -193,7 +200,6 @@
pid_t clientProcessId,
pid_t clientThreadId) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
if (serviceStream == nullptr) {
ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
streamHandle);
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 5a7a2b6..f5a7d2f 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -57,6 +57,8 @@
virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle);
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle);
+
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 80551c9..b197798 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -14,6 +14,17 @@
* limitations under the License.
*/
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
#include <algorithm>
#include <mutex>
#include <vector>
@@ -30,6 +41,12 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
+// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
+#define DEFAULT_BUFFER_CAPACITY (48 * 8)
+
+// Use 2 for "double buffered"
+#define BUFFER_SIZE_IN_BURSTS 2
+
// The mStreamInternal will use a service interface that does not go through Binder.
AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
: mStreamInternal(audioService, true)
@@ -43,11 +60,18 @@
aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId, aaudio_direction_t direction) {
AudioStreamBuilder builder;
builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+ // Don't fall back to SHARED because that would cause recursion.
+ builder.setSharingModeMatchRequired(true);
builder.setDeviceId(deviceId);
builder.setDirection(direction);
+ builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
+
aaudio_result_t result = mStreamInternal.open(builder);
if (result == AAUDIO_OK) {
mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
+
+ int32_t desiredBufferSize = BUFFER_SIZE_IN_BURSTS * mStreamInternal.getFramesPerBurst();
+ mStreamInternal.setBufferSize(desiredBufferSize);
}
return result;
}
@@ -58,15 +82,12 @@
// TODO, maybe use an interface to reduce exposure
aaudio_result_t AAudioServiceEndpoint::registerStream(AAudioServiceStreamShared *sharedStream) {
- ALOGD("AAudioServiceEndpoint::registerStream(%p)", sharedStream);
- // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.push_back(sharedStream);
return AAUDIO_OK;
}
aaudio_result_t AAudioServiceEndpoint::unregisterStream(AAudioServiceStreamShared *sharedStream) {
- ALOGD("AAudioServiceEndpoint::unregisterStream(%p)", sharedStream);
std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
mRegisteredStreams.end());
@@ -75,7 +96,6 @@
aaudio_result_t AAudioServiceEndpoint::startStream(AAudioServiceStreamShared *sharedStream) {
// TODO use real-time technique to avoid mutex, eg. atomic command FIFO
- ALOGD("AAudioServiceEndpoint(): startStream() entering");
std::lock_guard<std::mutex> lock(mLockStreams);
mRunningStreams.push_back(sharedStream);
if (mRunningStreams.size() == 1) {
@@ -106,13 +126,10 @@
// Render audio in the application callback and then write the data to the stream.
void *AAudioServiceEndpoint::callbackLoop() {
- aaudio_result_t result = AAUDIO_OK;
-
ALOGD("AAudioServiceEndpoint(): callbackLoop() entering");
+ int32_t underflowCount = 0;
- result = mStreamInternal.requestStart();
- ALOGD("AAudioServiceEndpoint(): callbackLoop() after requestStart() %d, isPlaying() = %d",
- result, (int) mStreamInternal.isPlaying());
+ aaudio_result_t result = mStreamInternal.requestStart();
// result might be a frame count
while (mCallbackEnabled.load() && mStreamInternal.isPlaying() && (result >= 0)) {
@@ -123,12 +140,14 @@
for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
float volume = 0.5; // TODO get from system
- mMixer.mix(fifo, volume);
+ bool underflowed = mMixer.mix(fifo, volume);
+ underflowCount += underflowed ? 1 : 0;
+ // TODO log underflows in each stream
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
}
}
// Write audio data to stream using a blocking write.
- ALOGD("AAudioServiceEndpoint(): callbackLoop() write(%d)", getFramesPerBurst());
int64_t timeoutNanos = calculateReasonableTimeout(mStreamInternal.getFramesPerBurst());
result = mStreamInternal.write(mMixer.getOutputBuffer(), getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
@@ -141,11 +160,9 @@
}
}
- ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, result = %d, isPlaying() = %d",
- result, (int) mStreamInternal.isPlaying());
-
result = mStreamInternal.requestStop();
+ ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, %d underflows", underflowCount);
return NULL; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 020d38a..a4ceae6 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -56,6 +56,16 @@
void *callbackLoop();
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ int32_t getReferenceCount() const {
+ return mReferenceCount;
+ }
+
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ void setReferenceCount(int32_t count) {
+ mReferenceCount = count;
+ }
+
private:
aaudio_result_t startMixer_l();
aaudio_result_t stopMixer_l();
@@ -64,13 +74,14 @@
AudioStreamInternal mStreamInternal;
AAudioMixer mMixer;
- AAudioServiceStreamMMAP mStreamMMAP;
std::atomic<bool> mCallbackEnabled;
+ int32_t mReferenceCount = 0;
std::mutex mLockStreams;
std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
std::vector<AAudioServiceStreamShared *> mRunningStreams;
+
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index b15043d..d8882c9 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -63,6 +63,7 @@
}
aaudio_result_t AAudioServiceStreamBase::start() {
+ ALOGD("AAudioServiceStreamBase::start() send AAUDIO_SERVICE_EVENT_STARTED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
mState = AAUDIO_STREAM_STATE_STARTED;
mThreadEnabled.store(true);
@@ -78,14 +79,37 @@
processError();
return result;
}
+ ALOGD("AAudioServiceStreamBase::pause() send AAUDIO_SERVICE_EVENT_PAUSED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
mState = AAUDIO_STREAM_STATE_PAUSED;
return result;
}
+aaudio_result_t AAudioServiceStreamBase::stop() {
+ // TODO wait for data to be played out
+ sendCurrentTimestamp();
+ mThreadEnabled.store(false);
+ aaudio_result_t result = mAAudioThread.stop();
+ if (result != AAUDIO_OK) {
+ processError();
+ return result;
+ }
+ ALOGD("AAudioServiceStreamBase::stop() send AAUDIO_SERVICE_EVENT_STOPPED");
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
+ mState = AAUDIO_STREAM_STATE_STOPPED;
+ return result;
+}
+
+aaudio_result_t AAudioServiceStreamBase::flush() {
+ ALOGD("AAudioServiceStreamBase::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+ mState = AAUDIO_STREAM_STATE_FLUSHED;
+ return AAUDIO_OK;
+}
+
// implement Runnable
void AAudioServiceStreamBase::run() {
- ALOGD("AAudioServiceStreamMMAP::run() entering ----------------");
+ ALOGD("AAudioServiceStreamBase::run() entering ----------------");
TimestampScheduler timestampScheduler;
timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
timestampScheduler.start(AudioClock::getNanoseconds());
@@ -102,7 +126,7 @@
AudioClock::sleepUntilNanoTime(nextTime);
}
}
- ALOGD("AAudioServiceStreamMMAP::run() exiting ----------------");
+ ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
}
void AAudioServiceStreamBase::processError() {
@@ -122,6 +146,10 @@
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+ if (mUpMessageQueue == nullptr) {
+ ALOGE("writeUpMessageQueue(): mUpMessageQueue null! - stream not open");
+ return AAUDIO_ERROR_NULL;
+ }
int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
if (count != 1) {
ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
@@ -133,9 +161,11 @@
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
+ //ALOGD("sendCurrentTimestamp() called");
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
+ //ALOGD("sendCurrentTimestamp(): position %d", (int) command.timestamp.position);
command.what = AAudioServiceMessage::code::TIMESTAMP;
result = writeUpMessageQueue(&command);
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 91eec35..d6b6ee3 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -17,6 +17,7 @@
#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
#define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+#include <assert.h>
#include <mutex>
#include "fifo/FifoBuffer.h"
@@ -60,17 +61,22 @@
/**
* Start the flow of data.
*/
- virtual aaudio_result_t start() = 0;
+ virtual aaudio_result_t start();
/**
* Stop the flow of data such that start() can resume with loss of data.
*/
- virtual aaudio_result_t pause() = 0;
+ virtual aaudio_result_t pause();
+
+ /**
+ * Stop the flow of data after data in buffer has played.
+ */
+ virtual aaudio_result_t stop();
/**
* Discard any data held by the underlying HAL or Service.
*/
- virtual aaudio_result_t flush() = 0;
+ virtual aaudio_result_t flush();
// -------------------------------------------------------------------
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index b70c625..b2e7fc9 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -55,6 +55,11 @@
aaudio_result_t AAudioServiceStreamMMAP::close() {
ALOGD("AAudioServiceStreamMMAP::close() called, %p", mMmapStream.get());
mMmapStream.clear(); // TODO review. Is that all we have to do?
+ // Apparently the above close is asynchronous. An attempt to open a new device
+ // right after a close can fail. Also some callbacks may still be in flight!
+ // FIXME Make closing synchronous.
+ AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
+
return AAudioServiceStreamBase::close();
}
@@ -79,8 +84,8 @@
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
audio_port_handle_t deviceId = configurationInput.getDeviceId();
- ALOGI("open request dump()");
- request.dump();
+ // ALOGI("open request dump()");
+ // request.dump();
mMmapClient.clientUid = request.getUserId();
mMmapClient.clientPid = request.getProcessId();
@@ -198,16 +203,25 @@
return (result1 != AAUDIO_OK) ? result1 : result2;
}
+aaudio_result_t AAudioServiceStreamMMAP::stop() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+ aaudio_result_t result1 = AAudioServiceStreamBase::stop();
+ aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+ mFramesRead.reset32();
+ return (result1 != AAUDIO_OK) ? result1 : result2;
+}
+
/**
* Discard any data held by the underlying HAL or Service.
*/
aaudio_result_t AAudioServiceStreamMMAP::flush() {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
// TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
- ALOGD("AAudioServiceStreamMMAP::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+ ALOGD("AAudioServiceStreamMMAP::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
mState = AAUDIO_STREAM_STATE_FLUSHED;
- return AAUDIO_OK;
+ return AAudioServiceStreamBase::flush();;
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index f121c5c..a8e63a6 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -66,6 +66,8 @@
*/
aaudio_result_t pause() override;
+ aaudio_result_t stop() override;
+
/**
* Discard any data held by the underlying HAL or Service.
*
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index cd9336b..b5d9927 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -61,7 +61,7 @@
ALOGD("AAudioServiceStreamShared::open(), direction = %d", direction);
AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
- mServiceEndpoint = mEndpointManager.findEndpoint(mAudioService, deviceId, direction);
+ mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, deviceId, direction);
ALOGD("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
if (mServiceEndpoint == nullptr) {
return AAUDIO_ERROR_UNAVAILABLE;
@@ -72,6 +72,7 @@
if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
} else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
return AAUDIO_ERROR_INVALID_FORMAT;
}
@@ -79,6 +80,8 @@
if (mSampleRate == AAUDIO_FORMAT_UNSPECIFIED) {
mSampleRate = mServiceEndpoint->getSampleRate();
} else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+ mSampleRate, mServiceEndpoint->getSampleRate());
return AAUDIO_ERROR_INVALID_RATE;
}
@@ -86,17 +89,22 @@
if (mSamplesPerFrame == AAUDIO_FORMAT_UNSPECIFIED) {
mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
} else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+ ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
+ mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
return AAUDIO_ERROR_OUT_OF_RANGE;
}
// Determine this stream's shared memory buffer capacity.
mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
int32_t minCapacityFrames = configurationInput.getBufferCapacity();
- int32_t numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
- if (numBursts < MIN_BURSTS_PER_BUFFER) {
- numBursts = MIN_BURSTS_PER_BUFFER;
- } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
- numBursts = MAX_BURSTS_PER_BUFFER;
+ int32_t numBursts = MAX_BURSTS_PER_BUFFER;
+ if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
+ numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+ if (numBursts < MIN_BURSTS_PER_BUFFER) {
+ numBursts = MIN_BURSTS_PER_BUFFER;
+ } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
+ numBursts = MAX_BURSTS_PER_BUFFER;
+ }
}
mCapacityInFrames = numBursts * mFramesPerBurst;
ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
@@ -122,8 +130,12 @@
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamShared::start() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// Add this stream to the mixer.
- aaudio_result_t result = mServiceEndpoint->startStream(this);
+ aaudio_result_t result = endpoint->startStream(this);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
processError();
@@ -139,15 +151,31 @@
* An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamShared::pause() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// Add this stream to the mixer.
- aaudio_result_t result = mServiceEndpoint->stopStream(this);
+ aaudio_result_t result = endpoint->stopStream(this);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
+ processError();
+ }
+ return AAudioServiceStreamBase::pause();
+}
+
+aaudio_result_t AAudioServiceStreamShared::stop() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Add this stream to the mixer.
+ aaudio_result_t result = endpoint->stopStream(this);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
processError();
- } else {
- result = AAudioServiceStreamBase::start();
}
- return AAUDIO_OK;
+ return AAudioServiceStreamBase::stop();
}
/**
@@ -157,15 +185,21 @@
*/
aaudio_result_t AAudioServiceStreamShared::flush() {
// TODO make sure we are paused
- return AAUDIO_OK;
+ // TODO actually flush the data
+ return AAudioServiceStreamBase::flush() ;
}
aaudio_result_t AAudioServiceStreamShared::close() {
pause();
// TODO wait for pause() to synchronize
- mServiceEndpoint->unregisterStream(this);
- mServiceEndpoint->close();
- mServiceEndpoint = nullptr;
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint != nullptr) {
+ endpoint->unregisterStream(this);
+
+ AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+ mEndpointManager.closeEndpoint(endpoint);
+ mServiceEndpoint = nullptr;
+ }
return AAudioServiceStreamBase::close();
}
@@ -189,10 +223,15 @@
mServiceEndpoint = nullptr;
}
+void AAudioServiceStreamShared::markTransferTime(int64_t nanoseconds) {
+ mMarkedPosition = mAudioDataQueue->getFifoBuffer()->getReadCounter();
+ mMarkedTime = nanoseconds;
+}
aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
int64_t *timeNanos) {
- *positionFrames = mAudioDataQueue->getFifoBuffer()->getReadCounter();
- *timeNanos = AudioClock::getNanoseconds();
+ // TODO get these two numbers as an atomic pair
+ *positionFrames = mMarkedPosition;
+ *timeNanos = mMarkedTime;
return AAUDIO_OK;
}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index f6df7ce..b981387 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -66,6 +66,11 @@
aaudio_result_t pause() override;
/**
+ * Stop the flow of data after data in buffer has played.
+ */
+ aaudio_result_t stop() override;
+
+ /**
* Discard any data held by the underlying HAL or Service.
*
* This is not guaranteed to be synchronous but it currently is.
@@ -77,6 +82,11 @@
android::FifoBuffer *getDataFifoBuffer() { return mAudioDataQueue->getFifoBuffer(); }
+ /* Keep a record of when a buffer transfer completed.
+ * This allows for a more accurate timing model.
+ */
+ void markTransferTime(int64_t nanoseconds);
+
void onStop();
void onDisconnect();
@@ -91,6 +101,9 @@
android::AAudioService &mAudioService;
AAudioServiceEndpoint *mServiceEndpoint = nullptr;
SharedRingBuffer *mAudioDataQueue;
+
+ int64_t mMarkedPosition = 0;
+ int64_t mMarkedTime = 0;
};
} /* namespace aaudio */