Merge "OMXNodeInstance: use a lock around OMX::freeNode" into oc-mr1-dev
diff --git a/drm/libmediadrm/PluginMetricsReporting.cpp b/drm/libmediadrm/PluginMetricsReporting.cpp
index a9302ea..57ff5b8 100644
--- a/drm/libmediadrm/PluginMetricsReporting.cpp
+++ b/drm/libmediadrm/PluginMetricsReporting.cpp
@@ -44,6 +44,13 @@
analyticsItem.setInt64(kParentAttribute, *parentId);
}
+ // Report the package name.
+ if (metricsGroup.has_app_package_name()) {
+ AString app_package_name(metricsGroup.app_package_name().c_str(),
+ metricsGroup.app_package_name().size());
+ analyticsItem.setPkgName(app_package_name);
+ }
+
for (int i = 0; i < metricsGroup.metric_size(); ++i) {
const MetricsGroup_Metric& metric = metricsGroup.metric(i);
if (!metric.has_name()) {
@@ -73,7 +80,12 @@
}
analyticsItem.setFinalized(true);
- analyticsItem.selfrecord();
+ if (!analyticsItem.selfrecord()) {
+ // Note the cast to int is because we build on 32 and 64 bit.
+ // The cast prevents a peculiar printf problem where one format cannot
+ // satisfy both.
+ ALOGE("selfrecord() returned false. sessioId %d", (int) sessionId);
+ }
for (int i = 0; i < metricsGroup.metric_sub_group_size(); ++i) {
const MetricsGroup& subGroup = metricsGroup.metric_sub_group(i);
diff --git a/drm/libmediadrm/protos/plugin_metrics.proto b/drm/libmediadrm/protos/plugin_metrics.proto
index 2d26f14..7e3bcf5 100644
--- a/drm/libmediadrm/protos/plugin_metrics.proto
+++ b/drm/libmediadrm/protos/plugin_metrics.proto
@@ -44,4 +44,7 @@
// Allow multiple sub groups of metrics.
repeated MetricsGroup metric_sub_group = 2;
+
+ // Name of the application package associated with the metrics.
+ optional string app_package_name = 3;
}
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 5fdac5c..ec07d87 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -25,10 +25,28 @@
#include "Session.h"
+namespace {
+const android::String8 kStreaming("Streaming");
+const android::String8 kOffline("Offline");
+const android::String8 kTrue("True");
+
+const android::String8 kQueryKeyLicenseType("LicenseType");
+ // Value: "Streaming" or "Offline"
+const android::String8 kQueryKeyPlayAllowed("PlayAllowed");
+ // Value: "True" or "False"
+const android::String8 kQueryKeyRenewAllowed("RenewAllowed");
+ // Value: "True" or "False"
+};
+
namespace clearkeydrm {
using android::sp;
+DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
+ : mSessionLibrary(sessionLibrary) {
+ mPlayPolicy.clear();
+}
+
status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
sp<Session> session = mSessionLibrary->createSession();
sessionId = session->sessionId();
@@ -60,18 +78,28 @@
if (scope.size() == 0) {
return android::BAD_VALUE;
}
+
if (keyType != kKeyType_Streaming) {
return android::ERROR_DRM_CANNOT_HANDLE;
}
+
*keyRequestType = DrmPlugin::kKeyRequestType_Initial;
defaultUrl.clear();
sp<Session> session = mSessionLibrary->findSession(scope);
if (!session.get()) {
return android::ERROR_DRM_SESSION_NOT_OPENED;
}
+
return session->getKeyRequest(initData, mimeType, &request);
}
+void DrmPlugin::setPlayPolicy() {
+ mPlayPolicy.clear();
+ mPlayPolicy.add(kQueryKeyLicenseType, kStreaming);
+ mPlayPolicy.add(kQueryKeyPlayAllowed, kTrue);
+ mPlayPolicy.add(kQueryKeyRenewAllowed, kTrue);
+}
+
status_t DrmPlugin::provideKeyResponse(
const Vector<uint8_t>& scope,
const Vector<uint8_t>& response,
@@ -83,6 +111,8 @@
if (!session.get()) {
return android::ERROR_DRM_SESSION_NOT_OPENED;
}
+
+ setPlayPolicy();
status_t res = session->provideKeyResponse(response);
if (res == android::OK) {
// This is for testing AMediaDrm_setOnEventListener only.
@@ -111,4 +141,18 @@
return android::OK;
}
+status_t DrmPlugin::queryKeyStatus(
+ const Vector<uint8_t>& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
+
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
+ infoMap.clear();
+ for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
+ infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
+ }
+ return android::OK;
+}
} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index 58421b9..f37a706 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -39,8 +39,8 @@
class DrmPlugin : public android::DrmPlugin {
public:
- explicit DrmPlugin(SessionLibrary* sessionLibrary)
- : mSessionLibrary(sessionLibrary) {}
+ explicit DrmPlugin(SessionLibrary* sessionLibrary);
+
virtual ~DrmPlugin() {}
virtual status_t openSession(Vector<uint8_t>& sessionId);
@@ -81,13 +81,7 @@
virtual status_t queryKeyStatus(
const Vector<uint8_t>& sessionId,
- KeyedVector<String8, String8>& infoMap) const {
- if (sessionId.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(infoMap);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ KeyedVector<String8, String8>& infoMap) const;
virtual status_t getProvisionRequest(
const String8& cert_type,
@@ -248,9 +242,12 @@
}
private:
- DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
+ void setPlayPolicy();
+ android::KeyedVector<android::String8, android::String8> mPlayPolicy;
SessionLibrary* mSessionLibrary;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
};
} // namespace clearkeydrm
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
index 9ef62c9..c179ce6 100644
--- a/media/libaaudio/examples/utils/AAudioExampleUtils.h
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -31,18 +31,51 @@
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
const char *getSharingModeText(aaudio_sharing_mode_t mode) {
- const char *modeText = "unknown";
+ const char *text = "unknown";
switch (mode) {
- case AAUDIO_SHARING_MODE_EXCLUSIVE:
- modeText = "EXCLUSIVE";
- break;
- case AAUDIO_SHARING_MODE_SHARED:
- modeText = "SHARED";
- break;
- default:
- break;
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ text = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ text = "SHARED";
+ break;
+ default:
+ break;
}
- return modeText;
+ return text;
+}
+
+const char *getPerformanceModeText(aaudio_performance_mode_t mode) {
+ const char *text = "unknown";
+ switch (mode) {
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ text = "NONE";
+ break;
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ text = "LOW_LATENCY";
+ break;
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ text = "POWER_SAVING";
+ break;
+ default:
+ break;
+ }
+ return text;
+}
+
+const char *getDirectionText(aaudio_direction_t direction) {
+ const char *text = "unknown";
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ text = "INPUT";
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ text = "OUTPUT";
+ break;
+ default:
+ break;
+ }
+ return text;
}
static void convertNanosecondsToTimespec(int64_t nanoseconds, struct timespec *time) {
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index d2e7f23..606c4ba 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -36,6 +36,13 @@
// How long to sleep in a callback to cause an intentional glitch. For testing.
#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
+#define MAX_TIMESTAMPS 16
+
+typedef struct Timestamp {
+ int64_t position;
+ int64_t nanoseconds;
+} Timestamp;
+
/**
* Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
*/
@@ -227,10 +234,12 @@
SineGenerator sineOsc1;
SineGenerator sineOsc2;
+ Timestamp timestamps[MAX_TIMESTAMPS];
int64_t framesTotal = 0;
int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
int32_t minNumFrames = INT32_MAX;
int32_t maxNumFrames = 0;
+ int32_t timestampCount = 0; // in timestamps
int scheduler = 0;
bool schedulerChecked = false;
@@ -273,6 +282,17 @@
sineData->schedulerChecked = true;
}
+ if (sineData->timestampCount < MAX_TIMESTAMPS) {
+ Timestamp *timestamp = &sineData->timestamps[sineData->timestampCount];
+ aaudio_result_t result = AAudioStream_getTimestamp(stream,
+ CLOCK_MONOTONIC, ×tamp->position, ×tamp->nanoseconds);
+ if (result == AAUDIO_OK && // valid?
+ (sineData->timestampCount == 0 || // first one?
+ (timestamp->position != (timestamp - 1)->position))) { // advanced position?
+ sineData->timestampCount++; // keep this one
+ }
+ }
+
if (numFrames > sineData->maxNumFrames) {
sineData->maxNumFrames = numFrames;
}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 2280b72..4f9cde6 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -120,6 +120,18 @@
goto error;
}
+ for (int i = 0; i < myData.timestampCount; i++) {
+ Timestamp *timestamp = &myData.timestamps[i];
+ bool retro = (i > 0 &&
+ ((timestamp->position < (timestamp - 1)->position)
+ || ((timestamp->nanoseconds < (timestamp - 1)->nanoseconds))));
+ const char *message = retro ? " <= RETROGRADE!" : "";
+ printf("Timestamp %3d : %8lld, %8lld %s\n", i,
+ (long long) timestamp->position,
+ (long long) timestamp->nanoseconds,
+ message);
+ }
+
if (myData.schedulerChecked) {
printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
myData.scheduler,
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 7f2e495..bbbd439 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -55,7 +55,7 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
-#define LOG_TIMESTAMPS 0
+#define LOG_TIMESTAMPS 0
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
@@ -66,9 +66,9 @@
, mStreamVolume(1.0f)
, mInService(inService)
, mServiceInterface(serviceInterface)
+ , mAtomicTimestamp()
, mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
, mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
- , mAtomicTimestamp()
{
ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
mWakeupDelayNanos, mMinimumSleepNanos);
@@ -250,25 +250,45 @@
}
}
+/*
+ * It normally takes about 20-30 msec to start a stream on the server.
+ * But the first time can take as much as 200-300 msec. The HW
+ * starts right away so by the time the client gets a chance to write into
+ * the buffer, it is already in a deep underflow state. That can cause the
+ * XRunCount to be non-zero, which could lead an app to tune its latency higher.
+ * To avoid this problem, we set a request for the processing code to start the
+ * client stream at the same position as the server stream.
+ * The processing code will then save the current offset
+ * between client and server and apply that to any position given to the app.
+ */
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
- ALOGD("AudioStreamInternal()::requestStart()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestStart() mServiceStreamHandle invalid");
+ ALOGE("requestStart() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
if (isActive()) {
- ALOGE("AudioStreamInternal::requestStart() already active");
+ ALOGE("requestStart() already active");
return AAUDIO_ERROR_INVALID_STATE;
}
- aaudio_stream_state_t originalState = getState();
+ aaudio_stream_state_t originalState = getState();
+ if (originalState == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ ALOGE("requestStart() but DISCONNECTED");
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
setState(AAUDIO_STREAM_STATE_STARTING);
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
+
+ // Clear any stale timestamps from the previous run.
+ drainTimestampsFromService();
+
+ status_t status = startWithStatus(); // Call PlayerBase, which will start the device stream.
+ aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
startTime = AudioClock::getNanoseconds();
mClockModel.start(startTime);
+ mNeedCatchUp.request(); // Ask data processing code to catch up when first timestamp received.
if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
// Launch the callback loop thread.
@@ -314,13 +334,14 @@
aaudio_result_t AudioStreamInternal::requestStopInternal()
{
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+ ALOGE("requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_STOPPING);
+ mAtomicTimestamp.clear();
return AAudioConvert_androidToAAudioResult(stopWithStatus());
}
@@ -336,7 +357,7 @@
aaudio_result_t AudioStreamInternal::registerThread() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::registerThread() mServiceStreamHandle invalid");
+ ALOGE("registerThread() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
return mServiceInterface.registerAudioThread(mServiceStreamHandle,
@@ -346,7 +367,7 @@
aaudio_result_t AudioStreamInternal::unregisterThread() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::unregisterThread() mServiceStreamHandle invalid");
+ ALOGE("unregisterThread() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, gettid());
@@ -374,12 +395,14 @@
// Generated in server and passed to client. Return latest.
if (mAtomicTimestamp.isValid()) {
Timestamp timestamp = mAtomicTimestamp.read();
- *framePosition = timestamp.getPosition();
- *timeNanoseconds = timestamp.getNanoseconds();
- return AAUDIO_OK;
- } else {
- return AAUDIO_ERROR_UNAVAILABLE;
+ int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
+ if (position >= 0) {
+ *framePosition = position;
+ *timeNanoseconds = timestamp.getNanoseconds();
+ return AAUDIO_OK;
+ }
}
+ return AAUDIO_ERROR_UNAVAILABLE;
}
aaudio_result_t AudioStreamInternal::updateStateMachine() {
@@ -394,14 +417,14 @@
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
int64_t nanoTime = command.timestamp.timestamp;
- ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
+ ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
(long long) framePosition,
(long long) nanoTime);
int64_t nanosDelta = nanoTime - oldTime;
if (nanosDelta > 0 && oldTime > 0) {
int64_t framesDelta = framePosition - oldPosition;
int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
- ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
+ ALOGD("logTimestamp: framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
(long long) framesDelta, (long long) nanosDelta, (long long) rate);
}
oldPosition = framePosition;
@@ -478,6 +501,34 @@
return result;
}
+aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
+ aaudio_result_t result = AAUDIO_OK;
+
+ while (result == AAUDIO_OK) {
+ AAudioServiceMessage message;
+ if (mAudioEndpoint.readUpCommand(&message) != 1) {
+ break; // no command this time, no problem
+ }
+ switch (message.what) {
+ // ignore most messages
+ case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
+ case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
+ break;
+
+ case AAudioServiceMessage::code::EVENT:
+ result = onEventFromServer(&message);
+ break;
+
+ default:
+ ALOGE("WARNING - drainTimestampsFromService() Unrecognized what = %d",
+ (int) message.what);
+ result = AAUDIO_ERROR_INTERNAL;
+ break;
+ }
+ }
+ return result;
+}
+
// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
aaudio_result_t result = AAUDIO_OK;
@@ -502,7 +553,7 @@
break;
default:
- ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ ALOGE("WARNING - processCommands() Unrecognized what = %d",
(int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
@@ -613,7 +664,7 @@
}
aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
- ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
+ ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
if (result < 0) {
return result;
} else {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 3523294..899d455 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -115,12 +115,15 @@
int64_t currentTimeNanos,
int64_t *wakeTimePtr) = 0;
+ aaudio_result_t drainTimestampsFromService();
+
aaudio_result_t processCommands();
aaudio_result_t requestStopInternal();
aaudio_result_t stopCallback();
+ virtual void advanceClientToMatchServerPosition() = 0;
virtual void onFlushFromServer() {}
@@ -167,6 +170,10 @@
AAudioServiceInterface &mServiceInterface; // abstract interface to the service
+ SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
+
+ AtomicRequestor mNeedCatchUp; // Ask read() or write() to sync on first timestamp.
+
private:
/*
* Asynchronous write with data conversion.
@@ -188,8 +195,6 @@
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
- SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
-
int64_t mServiceLatencyNanos = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 7b1e53e..b792ecd 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -39,6 +39,21 @@
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
+void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
+ int64_t readCounter = mAudioEndpoint.getDataReadCounter();
+ int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
+
+ // Bump offset so caller does not see the retrograde motion in getFramesRead().
+ int64_t offset = readCounter - writeCounter;
+ mFramesOffsetFromService += offset;
+ ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
+ (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
+
+ // Force readCounter to match writeCounter.
+ // This is because we cannot change the write counter in the hardware.
+ mAudioEndpoint.setDataReadCounter(writeCounter);
+}
+
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -57,6 +72,18 @@
const char *traceName = "aaRdNow";
ATRACE_BEGIN(traceName);
+ if (mClockModel.isStarting()) {
+ // Still haven't got any timestamps from server.
+ // Keep waiting until we get some valid timestamps then start writing to the
+ // current buffer position.
+ ALOGD("processDataNow() wait for valid timestamps");
+ // Sleep very briefly and hope we get a timestamp soon.
+ *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
+ ATRACE_END();
+ return 0;
+ }
+ // If we have gotten this far then we have at least one timestamp from server.
+
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
@@ -65,6 +92,14 @@
mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
}
+ // This code assumes that we have already received valid timestamps.
+ if (mNeedCatchUp.isRequested()) {
+ // Catch an MMAP pointer that is already advancing.
+ // This will avoid initial underruns caused by a slow cold start.
+ advanceClientToMatchServerPosition();
+ mNeedCatchUp.acknowledge();
+ }
+
// If the write index passed the read index then consider it an overrun.
if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
mXRunCount++;
@@ -100,8 +135,8 @@
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
+ wakeTime = mClockModel.convertPositionToTime(nextPosition);
}
break;
default:
@@ -186,8 +221,7 @@
}
int64_t AudioStreamInternalCapture::getFramesRead() {
- int64_t frames = mAudioEndpoint.getDataWriteCounter()
- + mFramesOffsetFromService;
+ int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
//ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
return frames;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 17f37e8..294dbaf 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -46,6 +46,8 @@
}
protected:
+ void advanceClientToMatchServerPosition() override;
+
/**
* Low level data processing that will not block. It will just read or write as much as it can.
*
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 31e0a40..f2e40a2 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -48,6 +48,7 @@
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_PAUSING);
+ mAtomicTimestamp.clear();
return AAudioConvert_androidToAAudioResult(pauseWithStatus());
}
@@ -72,21 +73,25 @@
return mServiceInterface.flushStream(mServiceStreamHandle);
}
-void AudioStreamInternalPlay::onFlushFromServer() {
+void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
int64_t readCounter = mAudioEndpoint.getDataReadCounter();
int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
// Bump offset so caller does not see the retrograde motion in getFramesRead().
- int64_t framesFlushed = writeCounter - readCounter;
- mFramesOffsetFromService += framesFlushed;
- ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
+ int64_t offset = writeCounter - readCounter;
+ mFramesOffsetFromService += offset;
+ ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
(long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
- // Flush written frames by forcing writeCounter to readCounter.
- // This is because we cannot move the read counter in the hardware.
+ // Force writeCounter to match readCounter.
+ // This is because we cannot change the read counter in the hardware.
mAudioEndpoint.setDataWriteCounter(readCounter);
}
+void AudioStreamInternalPlay::onFlushFromServer() {
+ advanceClientToMatchServerPosition();
+}
+
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -106,6 +111,18 @@
const char *traceName = "aaWrNow";
ATRACE_BEGIN(traceName);
+ if (mClockModel.isStarting()) {
+ // Still haven't got any timestamps from server.
+ // Keep waiting until we get some valid timestamps then start writing to the
+ // current buffer position.
+ ALOGD("processDataNow() wait for valid timestamps");
+ // Sleep very briefly and hope we get a timestamp soon.
+ *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
+ ATRACE_END();
+ return 0;
+ }
+ // If we have gotten this far then we have at least one timestamp from server.
+
// If a DMA channel or DSP is reading the other end then we have to update the readCounter.
if (mAudioEndpoint.isFreeRunning()) {
// Update data queue based on the timing model.
@@ -114,6 +131,13 @@
mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
}
+ if (mNeedCatchUp.isRequested()) {
+ // Catch an MMAP pointer that is already advancing.
+ // This will avoid initial underruns caused by a slow cold start.
+ advanceClientToMatchServerPosition();
+ mNeedCatchUp.acknowledge();
+ }
+
// If the read index passed the write index then consider it an underrun.
if (mAudioEndpoint.getFullFramesAvailable() < 0) {
mXRunCount++;
@@ -153,9 +177,9 @@
// Calculate frame position based off of the writeCounter because
// the readCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
+ int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
- mAudioEndpoint.getBufferSizeInFrames();
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ wakeTime = mClockModel.convertPositionToTime(nextPosition);
}
break;
default:
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index e59d02c..fdb1fd7 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -54,6 +54,8 @@
aaudio_result_t requestPauseInternal();
+ void advanceClientToMatchServerPosition() override;
+
void onFlushFromServer() override;
/**
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index c06c8a9..bac69f1 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -48,19 +48,26 @@
}
void IsochronousClockModel::start(int64_t nanoTime) {
- ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
void IsochronousClockModel::stop(int64_t nanoTime) {
- ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
}
+bool IsochronousClockModel::isStarting() {
+ return mState == STATE_STARTING;
+}
+
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
+// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
+// (long long)framePosition,
+// (long long)nanoTime);
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@@ -70,9 +77,6 @@
// ALOGD("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
// (long long)mMarkerFramePosition,
// (long long)mMarkerNanoTime);
-// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-// (long long)framePosition,
-// (long long)nanoTime);
int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
// ALOGD("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
@@ -116,6 +120,8 @@
default:
break;
}
+
+// ALOGD("processTimestamp() - mState = %d", mState);
}
void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 585f53a..7182376 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -36,6 +36,8 @@
void start(int64_t nanoTime);
void stop(int64_t nanoTime);
+ bool isStarting();
+
void processTimestamp(int64_t framePosition, int64_t nanoTime);
/**
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index 2816bac..ee29177 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -21,6 +21,7 @@
#include <stdint.h>
#include <utils/String16.h>
#include <media/AudioTrack.h>
+#include <media/AudioTimestamp.h>
#include <aaudio/AAudio.h>
#include "core/AudioStream.h"
@@ -46,16 +47,32 @@
return AudioStreamLegacy_callback;
}
-// Implement FixedBlockProcessor
-int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
- int32_t frameCount = numBytes / getBytesPerFrame();
+int32_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer, int32_t numFrames) {
+ if (getDirection() == AAUDIO_DIRECTION_INPUT) {
+ // Increment before because we already got the data from the device.
+ incrementFramesRead(numFrames);
+ }
+
// Call using the AAudio callback interface.
AAudioStream_dataCallback appCallback = getDataCallbackProc();
- return (*appCallback)(
+ aaudio_data_callback_result_t callbackResult = (*appCallback)(
(AAudioStream *) this,
getDataCallbackUserData(),
buffer,
- frameCount);
+ numFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE
+ && getDirection() == AAUDIO_DIRECTION_OUTPUT) {
+ // Increment after because we are going to write the data to the device.
+ incrementFramesWritten(numFrames);
+ }
+ return callbackResult;
+}
+
+// Implement FixedBlockProcessor
+int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t numFrames = numBytes / getBytesPerFrame();
+ return callDataCallbackFrames(buffer, numFrames);
}
void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
@@ -81,16 +98,11 @@
(uint8_t *) audioBuffer->raw, byteCount);
} else {
// Call using the AAudio callback interface.
- callbackResult = (*getDataCallbackProc())(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- audioBuffer->raw,
- audioBuffer->frameCount
- );
+ callbackResult = callDataCallbackFrames((uint8_t *)audioBuffer->raw,
+ audioBuffer->frameCount);
}
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
- incrementClientFrameCounter(audioBuffer->frameCount);
} else {
audioBuffer->size = 0;
}
@@ -139,7 +151,18 @@
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
break;
}
- status_t status = extendedTimestamp->getBestTimestamp(framePosition, timeNanoseconds, timebase);
+ ExtendedTimestamp::Location location = ExtendedTimestamp::Location::LOCATION_INVALID;
+ int64_t localPosition;
+ status_t status = extendedTimestamp->getBestTimestamp(&localPosition, timeNanoseconds,
+ timebase, &location);
+ // use MonotonicCounter to prevent retrograde motion.
+ mTimestampPosition.update32((int32_t)localPosition);
+ *framePosition = mTimestampPosition.get();
+
+// ALOGD("getBestTimestamp() fposition: server = %6lld, kernel = %6lld, location = %d",
+// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_SERVER],
+// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_KERNEL],
+// (int)location);
return AAudioConvert_androidToAAudioResult(status);
}
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index d2ef3c7..66c216c 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -63,6 +63,8 @@
aaudio_legacy_callback_t getLegacyCallback();
+ int32_t callDataCallbackFrames(uint8_t *buffer, int32_t numFrames);
+
// This is public so it can be called from the C callback function.
// This is called from the AudioTrack/AudioRecord client.
virtual void processCallback(int event, void *info) = 0;
@@ -122,6 +124,7 @@
MonotonicCounter mFramesWritten;
MonotonicCounter mFramesRead;
+ MonotonicCounter mTimestampPosition;
FixedBlockAdapter *mBlockAdapter = nullptr;
aaudio_wrapping_frames_t mPositionWhenStarting = 0;
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 041280d..c8b94ae 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -233,8 +233,10 @@
onStop();
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
+ mTimestampPosition.set(getFramesRead());
mAudioRecord->stop();
mFramesRead.reset32();
+ mTimestampPosition.reset32();
return AAUDIO_OK;
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 155362c..702b12a 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -285,6 +285,7 @@
incrementFramesRead(getFramesWritten() - getFramesRead());
mAudioTrack->flush();
mFramesWritten.reset32();
+ mTimestampPosition.reset32();
return AAUDIO_OK;
}
@@ -298,8 +299,10 @@
onStop();
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
+ mTimestampPosition.set(getFramesWritten());
stop();
mFramesWritten.reset32();
+ mTimestampPosition.reset32();
return AAUDIO_OK;
}
@@ -447,5 +450,18 @@
if (status != NO_ERROR) {
return AAudioConvert_androidToAAudioResult(status);
}
- return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
+ int64_t position = 0;
+ int64_t nanoseconds = 0;
+ aaudio_result_t result = getBestTimestamp(clockId, &position,
+ &nanoseconds, &extendedTimestamp);
+ if (result == AAUDIO_OK) {
+ if (position < getFramesWritten()) {
+ *framePosition = position;
+ *timeNanoseconds = nanoseconds;
+ return result;
+ } else {
+ return AAUDIO_ERROR_INVALID_STATE; // TODO review, documented but not consistent
+ }
+ }
+ return result;
}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index b0c6c94..f56be32 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -272,8 +272,7 @@
class SimpleDoubleBuffer {
public:
SimpleDoubleBuffer()
- : mValues()
- , mCounter(0) {}
+ : mValues() {}
__attribute__((no_sanitize("integer")))
void write(T value) {
@@ -282,6 +281,14 @@
mCounter++; // Increment AFTER updating storage, OK if it wraps.
}
+ /**
+ * This should only be called by the same thread that calls write() or when
+ * no other thread is calling write.
+ */
+ void clear() {
+ mCounter.store(0);
+ }
+
T read() const {
T result;
int before;
@@ -293,7 +300,7 @@
int index = (before & 1) ^ 1;
result = mValues[index];
after = mCounter.load();
- } while ((after != before) && --timeout > 0);
+ } while ((after != before) && (after > 0) && (--timeout > 0));
return result;
}
@@ -306,7 +313,7 @@
private:
T mValues[2];
- std::atomic<int> mCounter;
+ std::atomic<int> mCounter{0};
};
class Timestamp {
@@ -328,4 +335,32 @@
int64_t mNanoseconds;
};
+
+/**
+ * Pass a request to another thread.
+ * This is used when one thread, A, wants another thread, B, to do something.
+ * A naive approach would be for A to set a flag and for B to clear it when done.
+ * But that creates a race condition. This technique avoids the race condition.
+ *
+ * Assumes only one requester and one acknowledger.
+ */
+class AtomicRequestor {
+public:
+ void request() {
+ // TODO handle overflows, very unlikely
+ mRequested++;
+ }
+
+ bool isRequested() {
+ return mRequested.load() > mAcknowledged.load();
+ }
+
+ void acknowledge() {
+ mAcknowledged++;
+ }
+
+private:
+ std::atomic<int> mRequested{0};
+ std::atomic<int> mAcknowledged{0};
+};
#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 81d7f89..13c92a2 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -41,6 +41,13 @@
}
/**
+ * set the current value of the counter
+ */
+ void set(int64_t counter) {
+ mCounter64 = counter;
+ }
+
+ /**
* Advance the counter if delta is positive.
* @return current value of the counter
*/
diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp
index d9ca391..49de05a 100644
--- a/media/libaaudio/tests/test_timestamps.cpp
+++ b/media/libaaudio/tests/test_timestamps.cpp
@@ -17,23 +17,99 @@
// Play silence and recover from dead servers or disconnected devices.
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#include <aaudio/AAudio.h>
#include <aaudio/AAudioTesting.h>
-
#include "utils/AAudioExampleUtils.h"
+#include "../examples/utils/AAudioExampleUtils.h"
-#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+// Arbitrary period for glitches, once per second at 48000 Hz.
+#define FORCED_UNDERRUN_PERIOD_FRAMES 48000
+// How long to sleep in a callback to cause an intentional glitch. For testing.
+#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
-int main(int argc, char **argv) {
- (void) argc;
- (void *)argv;
+#define MAX_TIMESTAMPS 1000
+#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+
+#define NUM_SECONDS 1
+#define NUM_LOOPS 4
+
+typedef struct TimestampInfo {
+ int64_t framesTotal;
+ int64_t appPosition; // frames
+ int64_t appNanoseconds;
+ int64_t timestampPosition; // frames
+ int64_t timestampNanos;
+ aaudio_result_t result;
+} TimestampInfo;
+
+typedef struct TimestampCallbackData_s {
+ TimestampInfo timestamps[MAX_TIMESTAMPS];
+ int64_t framesTotal = 0;
+ int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
+ int32_t timestampCount = 0; // in timestamps
+ bool forceUnderruns = false;
+} TimestampCallbackData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t timestampDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData __unused,
+ int32_t numFrames
+) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - SimplePlayerDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ TimestampCallbackData_t *timestampData = (TimestampCallbackData_t *) userData;
+
+ aaudio_direction_t direction = AAudioStream_getDirection(stream);
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ timestampData->framesTotal += numFrames;
+ }
+
+ if (timestampData->forceUnderruns) {
+ if (timestampData->framesTotal > timestampData->nextFrameToGlitch) {
+ usleep(FORCED_UNDERRUN_SLEEP_MICROS);
+ printf("Simulate glitch at %lld\n", (long long) timestampData->framesTotal);
+ timestampData->nextFrameToGlitch += FORCED_UNDERRUN_PERIOD_FRAMES;
+ }
+ }
+
+ if (timestampData->timestampCount < MAX_TIMESTAMPS) {
+ TimestampInfo *timestamp = ×tampData->timestamps[timestampData->timestampCount];
+ timestamp->result = AAudioStream_getTimestamp(stream,
+ CLOCK_MONOTONIC,
+ ×tamp->timestampPosition,
+ ×tamp->timestampNanos);
+ timestamp->framesTotal = timestampData->framesTotal;
+ timestamp->appPosition = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? AAudioStream_getFramesWritten(stream)
+ : AAudioStream_getFramesRead(stream);
+ timestamp->appNanoseconds = getNanoseconds();
+ timestampData->timestampCount++;
+ }
+
+ if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ timestampData->framesTotal += numFrames;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static TimestampCallbackData_t sTimestampData;
+
+static aaudio_result_t testTimeStamps(aaudio_policy_t mmapPolicy,
+ aaudio_sharing_mode_t sharingMode,
+ aaudio_performance_mode_t performanceMode,
+ aaudio_direction_t direction) {
aaudio_result_t result = AAUDIO_OK;
- int32_t triesLeft = 3;
- int32_t bufferCapacity;
int32_t framesPerBurst = 0;
float *buffer = nullptr;
@@ -44,22 +120,20 @@
int32_t finalBufferSize = 0;
aaudio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_FLOAT;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
- int32_t framesMax;
- int64_t framesTotal;
- int64_t printAt;
- int samplesPerBurst;
- int64_t previousFramePosition = -1;
+ aaudio_sharing_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ memset(&sTimestampData, 0, sizeof(sTimestampData));
- printf("Test Timestamps V0.1.1\n");
+ printf("------------ testTimeStamps(policy = %d, sharing = %s, perf = %s, dir = %s) -----------\n",
+ mmapPolicy,
+ getSharingModeText(sharingMode),
+ getPerformanceModeText(performanceMode),
+ getDirectionText(direction));
- AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO);
+ AAudio_setMMapPolicy(mmapPolicy);
// Use an AAudioStreamBuilder to contain requested parameters.
result = AAudio_createStreamBuilder(&aaudioBuilder);
@@ -70,9 +144,11 @@
}
// Request stream properties.
- AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
- AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_I16);
+ AAudioStreamBuilder_setSharingMode(aaudioBuilder, sharingMode);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, performanceMode);
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, timestampDataCallbackProc, &sTimestampData);
// Create an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
@@ -87,10 +163,25 @@
actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("-------- chans = %3d, rate = %6d format = %d\n",
- actualChannelCount, actualSampleRate, actualDataFormat);
+ actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+ if (actualSharingMode != sharingMode) {
+ printf("did not get expected sharingMode, got %3d, skipping test\n",
+ actualSharingMode);
+ result = AAUDIO_OK;
+ goto finish;
+ }
+ actualPerformanceMode = AAudioStream_getPerformanceMode(aaudioStream);
+ if (actualPerformanceMode != performanceMode) {
+ printf("did not get expected performanceMode, got %3d, skipping test\n",
+ actualPerformanceMode);
+ result = AAUDIO_OK;
+ goto finish;
+ }
+
+ printf(" chans = %3d, rate = %6d format = %d\n",
+ actualChannelCount, actualSampleRate, actualDataFormat);
printf(" Is MMAP used? %s\n", AAudioStream_isMMapUsed(aaudioStream)
- ? "yes" : "no");
+ ? "yes" : "no");
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
@@ -98,91 +189,143 @@
printf(" framesPerBurst = %3d\n", framesPerBurst);
originalBufferSize = AAudioStream_getBufferSizeInFrames(aaudioStream);
- requestedBufferSize = 2 * framesPerBurst;
+ requestedBufferSize = 4 * framesPerBurst;
finalBufferSize = AAudioStream_setBufferSizeInFrames(aaudioStream, requestedBufferSize);
printf(" BufferSize: original = %4d, requested = %4d, final = %4d\n",
originalBufferSize, requestedBufferSize, finalBufferSize);
- samplesPerBurst = framesPerBurst * actualChannelCount;
- buffer = new float[samplesPerBurst];
-
- result = AAudioStream_requestStart(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_requestStart returned %s",
+ {
+ int64_t position;
+ int64_t nanoseconds;
+ result = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC, &position, &nanoseconds);
+ printf("before start, AAudioStream_getTimestamp() returns %s\n",
AAudio_convertResultToText(result));
- goto finish;
}
- // Play silence very briefly.
- framesMax = actualSampleRate * 4;
- framesTotal = 0;
- printAt = actualSampleRate;
- while (result == AAUDIO_OK && framesTotal < framesMax) {
- int32_t framesWritten = AAudioStream_write(aaudioStream,
- buffer, framesPerBurst,
- DEFAULT_TIMEOUT_NANOS);
- if (framesWritten < 0) {
- result = framesWritten;
- printf("write() returned %s, frames = %d\n",
- AAudio_convertResultToText(result), (int)framesTotal);
- printf(" frames = %d\n", (int)framesTotal);
- } else if (framesWritten != framesPerBurst) {
- printf("write() returned %d, frames = %d\n", framesWritten, (int)framesTotal);
- result = AAUDIO_ERROR_TIMEOUT;
- } else {
- framesTotal += framesWritten;
- if (framesTotal >= printAt) {
- printf("frames = %d\n", (int)framesTotal);
- printAt += actualSampleRate;
+ for (int runs = 0; runs < NUM_LOOPS; runs++) {
+ printf("------------------ loop #%d\n", runs);
+
+ int64_t temp = sTimestampData.framesTotal;
+ memset(&sTimestampData, 0, sizeof(sTimestampData));
+ sTimestampData.framesTotal = temp;
+
+ sTimestampData.forceUnderruns = false;
+
+ result = AAudioStream_requestStart(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStart returned %s",
+ AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ for (int second = 0; second < NUM_SECONDS; second++) {
+ // Give AAudio callback time to run in the background.
+ sleep(1);
+
+ // Periodically print the progress so we know it hasn't died.
+ printf("framesWritten = %d, XRuns = %d\n",
+ (int) AAudioStream_getFramesWritten(aaudioStream),
+ (int) AAudioStream_getXRunCount(aaudioStream)
+ );
+ }
+
+ result = AAudioStream_requestStop(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStop returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+
+ printf("timestampCount = %d\n", sTimestampData.timestampCount);
+ int printed = 0;
+ for (int i = 0; i < sTimestampData.timestampCount; i++) {
+ TimestampInfo *timestamp = &sTimestampData.timestamps[i];
+ bool posChanged = (timestamp->timestampPosition != (timestamp - 1)->timestampPosition);
+ bool timeChanged = (timestamp->timestampNanos != (timestamp - 1)->timestampNanos);
+ if ((printed < 20) && ((i < 10) || posChanged || timeChanged)) {
+ printf(" %3d : frames %8lld, xferd %8lld", i,
+ (long long) timestamp->framesTotal,
+ (long long) timestamp->appPosition);
+ if (timestamp->result != AAUDIO_OK) {
+ printf(", result = %s\n", AAudio_convertResultToText(timestamp->result));
+ } else {
+ bool negative = timestamp->timestampPosition < 0;
+ bool retro = (i > 0 && (timestamp->timestampPosition <
+ (timestamp - 1)->timestampPosition));
+ const char *message = negative ? " <=NEGATIVE!"
+ : (retro ? " <= RETROGRADE!" : "");
+
+ double latency = calculateLatencyMillis(timestamp->timestampPosition,
+ timestamp->timestampNanos,
+ timestamp->appPosition,
+ timestamp->appNanoseconds,
+ actualSampleRate);
+ printf(", STAMP: pos = %8lld, nanos = %8lld, lat = %7.1f msec %s\n",
+ (long long) timestamp->timestampPosition,
+ (long long) timestamp->timestampNanos,
+ latency,
+ message);
+ }
+ printed++;
}
}
- // Print timestamps.
- int64_t framePosition = 0;
- int64_t frameTime = 0;
- aaudio_result_t timeResult;
- timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
- &framePosition, &frameTime);
-
- if (timeResult == AAUDIO_OK) {
- if (framePosition > (previousFramePosition + 5000)) {
- int64_t realTime = getNanoseconds();
- int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream);
-
- double latencyMillis = calculateLatencyMillis(framePosition, frameTime,
- framesWritten, realTime,
- actualSampleRate);
-
- printf("--- timestamp: result = %4d, position = %lld, at %lld nanos"
- ", latency = %7.2f msec\n",
- timeResult,
- (long long) framePosition,
- (long long) frameTime,
- latencyMillis);
- previousFramePosition = framePosition;
- }
- }
+ // Avoid race conditions in AudioFlinger.
+ // There is normally a delay between a real user stopping and restarting a stream.
+ sleep(1);
}
- result = AAudioStream_requestStop(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_requestStop returned %s\n",
- AAudio_convertResultToText(result));
- }
- result = AAudioStream_close(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_close returned %s\n",
- AAudio_convertResultToText(result));
- }
- aaudioStream = nullptr;
-
-
finish:
if (aaudioStream != nullptr) {
AAudioStream_close(aaudioStream);
}
AAudioStreamBuilder_delete(aaudioBuilder);
- delete[] buffer;
printf("result = %d = %s\n", result, AAudio_convertResultToText(result));
+
+ return result;
+}
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void *) argv;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Timestamps V0.1.2\n");
+ // Legacy
+// result = testTimeStamps(AAUDIO_POLICY_NEVER,
+// AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_NONE,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_NEVER,
+// AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_NEVER, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_NONE,
+// AAUDIO_DIRECTION_OUTPUT);
+ result = testTimeStamps(AAUDIO_POLICY_NEVER, AAUDIO_SHARING_MODE_SHARED,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_DIRECTION_OUTPUT);
+ // MMAP
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS,
+// AAUDIO_SHARING_MODE_EXCLUSIVE,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS,
+// AAUDIO_SHARING_MODE_EXCLUSIVE,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_OUTPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_OUTPUT);
+
+ return (result == AAUDIO_OK) ? EXIT_SUCCESS : EXIT_FAILURE;
}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index c212112..b99eaff 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -615,7 +615,8 @@
+ mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
(long long)mStartEts.mFlushed,
(long long)mFramesWritten);
- mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
+ // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
+ mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
}
mFramesWritten = 0;
mProxy->clearTimestamp(); // need new server push for valid timestamp
@@ -2096,7 +2097,14 @@
// Convert frame units to time units
nsecs_t ns = NS_WHENEVER;
if (minFrames != (uint32_t) ~0) {
- ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
+ // AudioFlinger consumption of client data may be irregular when coming out of device
+ // standby since the kernel buffers require filling. This is throttled to no more than 2x
+ // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
+ // half (but no more than half a second) to improve callback accuracy during these temporary
+ // data surges.
+ const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
+ constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
+ ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
// TODO: Should we warn if the callback time is too long?
if (ns < 0) ns = 0;
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index bbd1807..ffd30ea 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -986,6 +986,12 @@
for (const auto& type : typeMap) {
const auto& typeName = type.first;
const char* roleName = GetComponentRole(isEncoder, typeName.data());
+ if (roleName == nullptr) {
+ ALOGE("Cannot find the role for %s of type %s",
+ isEncoder ? "an encoder" : "a decoder",
+ typeName.data());
+ continue;
+ }
const auto& typeAttributeMap = type.second;
auto roleIterator = mRoleMap.find(roleName);
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 4132fed..ad1105a 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -542,6 +542,7 @@
size_t length;
bool read = false;
bool write = false;
+ bool short_packet = false;
posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
@@ -586,6 +587,7 @@
// For larger files, receive until a short packet is received.
if (static_cast<size_t>(ret) < length) {
file_length = 0;
+ short_packet = true;
}
} else {
// Receive an empty packet if size is a multiple of the endpoint size.
@@ -605,7 +607,7 @@
read = false;
}
}
- if (ret % packet_size == 0 || zero_packet) {
+ if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
if (TEMP_FAILURE_RETRY(::read(mBulkOut, data, packet_size)) != 0) {
return -1;
}
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 51143ac..eecc858 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -421,7 +421,7 @@
for (size_t i = 0; i < mObj->mQueryResults.size(); i++) {
keyValuePairs[i].mKey = mObj->mQueryResults.keyAt(i).string();
- keyValuePairs[i].mValue = mObj->mQueryResults.keyAt(i).string();
+ keyValuePairs[i].mValue = mObj->mQueryResults.valueAt(i).string();
}
*numPairs = mObj->mQueryResults.size();
return AMEDIA_OK;
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 3f4017f..0d2dba1 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -121,18 +121,17 @@
if (mCallbackStreamId != NO_STREAM) {
// Check if stream parameters have to change
- uint32_t currentWidth, currentHeight, currentFormat;
- res = device->getStreamInfo(mCallbackStreamId,
- ¤tWidth, ¤tHeight, ¤tFormat, 0);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mCallbackStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying callback output stream info: "
"%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.previewWidth ||
- currentHeight != (uint32_t)params.previewHeight ||
- currentFormat != (uint32_t)callbackFormat) {
+ if (streamInfo.width != (uint32_t)params.previewWidth ||
+ streamInfo.height != (uint32_t)params.previewHeight ||
+ !streamInfo.matchFormat((uint32_t)callbackFormat)) {
// Since size should only change while preview is not running,
// assuming that all existing use of old callback stream is
// completed.
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index d6d8dde..d8b7af2 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -136,17 +136,16 @@
if (mCaptureStreamId != NO_STREAM) {
// Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = device->getStreamInfo(mCaptureStreamId,
- ¤tWidth, ¤tHeight, 0, 0);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mCaptureStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying capture output stream info: "
"%s (%d)", __FUNCTION__,
mId, strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.pictureWidth ||
- currentHeight != (uint32_t)params.pictureHeight) {
+ if (streamInfo.width != (uint32_t)params.pictureWidth ||
+ streamInfo.height != (uint32_t)params.pictureHeight) {
ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
__FUNCTION__, mId, mCaptureStreamId);
res = device->deleteStream(mCaptureStreamId);
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index d79e430..b6f443a 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -161,18 +161,17 @@
if (mPreviewStreamId != NO_STREAM) {
// Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = device->getStreamInfo(mPreviewStreamId,
- ¤tWidth, ¤tHeight, 0, 0);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mPreviewStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying preview stream info: "
"%s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.previewWidth ||
- currentHeight != (uint32_t)params.previewHeight) {
+ if (streamInfo.width != (uint32_t)params.previewWidth ||
+ streamInfo.height != (uint32_t)params.previewHeight) {
ALOGV("%s: Camera %d: Preview size switch: %d x %d -> %d x %d",
- __FUNCTION__, mId, currentWidth, currentHeight,
+ __FUNCTION__, mId, streamInfo.width, streamInfo.height,
params.previewWidth, params.previewHeight);
res = device->waitUntilDrained();
if (res != OK) {
@@ -312,10 +311,8 @@
return INVALID_OPERATION;
}
- uint32_t currentWidth, currentHeight, currentFormat;
- android_dataspace currentDataSpace;
- res = device->getStreamInfo(mRecordingStreamId,
- ¤tWidth, ¤tHeight, ¤tFormat, ¤tDataSpace);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mRecordingStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying recording output stream info: "
"%s (%d)", __FUNCTION__, mId,
@@ -324,10 +321,10 @@
}
if (mRecordingWindow == nullptr ||
- currentWidth != (uint32_t)params.videoWidth ||
- currentHeight != (uint32_t)params.videoHeight ||
- currentFormat != (uint32_t)params.videoFormat ||
- currentDataSpace != params.videoDataSpace) {
+ streamInfo.width != (uint32_t)params.videoWidth ||
+ streamInfo.height != (uint32_t)params.videoHeight ||
+ !streamInfo.matchFormat((uint32_t)params.videoFormat) ||
+ streamInfo.dataSpace != params.videoDataSpace) {
*needsUpdate = true;
return res;
}
@@ -348,22 +345,18 @@
if (mRecordingStreamId != NO_STREAM) {
// Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- uint32_t currentFormat;
- android_dataspace currentDataSpace;
- res = device->getStreamInfo(mRecordingStreamId,
- ¤tWidth, ¤tHeight,
- ¤tFormat, ¤tDataSpace);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mRecordingStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying recording output stream info: "
"%s (%d)", __FUNCTION__, mId,
strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.videoWidth ||
- currentHeight != (uint32_t)params.videoHeight ||
- currentFormat != (uint32_t)params.videoFormat ||
- currentDataSpace != params.videoDataSpace) {
+ if (streamInfo.width != (uint32_t)params.videoWidth ||
+ streamInfo.height != (uint32_t)params.videoHeight ||
+ !streamInfo.matchFormat((uint32_t)params.videoFormat) ||
+ streamInfo.dataSpace != params.videoDataSpace) {
// TODO: Should wait to be sure previous recording has finished
res = device->deleteStream(mRecordingStreamId);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 9bc31b9..b0607fb 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -233,17 +233,16 @@
if ((mZslStreamId != NO_STREAM) || (mInputStreamId != NO_STREAM)) {
// Check if stream parameters have to change
- uint32_t currentWidth, currentHeight;
- res = device->getStreamInfo(mZslStreamId,
- ¤tWidth, ¤tHeight, 0, 0);
+ CameraDeviceBase::StreamInfo streamInfo;
+ res = device->getStreamInfo(mZslStreamId, &streamInfo);
if (res != OK) {
ALOGE("%s: Camera %d: Error querying capture output stream info: "
"%s (%d)", __FUNCTION__,
client->getCameraId(), strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
- currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
+ if (streamInfo.width != (uint32_t)params.fastInfo.arrayWidth ||
+ streamInfo.height != (uint32_t)params.fastInfo.arrayHeight) {
if (mZslStreamId != NO_STREAM) {
ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
"dimensions changed",
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 54fcb0a..fe4c8d7 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -142,12 +142,32 @@
virtual status_t createInputStream(uint32_t width, uint32_t height,
int32_t format, /*out*/ int32_t *id) = 0;
+ struct StreamInfo {
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ bool formatOverridden;
+ uint32_t originalFormat;
+ android_dataspace dataSpace;
+ StreamInfo() : width(0), height(0), format(0), formatOverridden(false), originalFormat(0),
+ dataSpace(HAL_DATASPACE_UNKNOWN) {}
+ /**
+ * Check whether the format matches the current or the original one in case
+ * it got overridden.
+ */
+ bool matchFormat(uint32_t clientFormat) {
+ if ((formatOverridden && (originalFormat == clientFormat)) ||
+ (format == clientFormat)) {
+ return true;
+ }
+ return false;
+ }
+ };
+
/**
* Get information about a given stream.
*/
- virtual status_t getStreamInfo(int id,
- uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace) = 0;
+ virtual status_t getStreamInfo(int id, StreamInfo *streamInfo) = 0;
/**
* Set stream gralloc buffer transform
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 94e8f3b..669f763 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1345,10 +1345,11 @@
return OK;
}
-status_t Camera3Device::getStreamInfo(int id,
- uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace) {
+status_t Camera3Device::getStreamInfo(int id, StreamInfo *streamInfo) {
ATRACE_CALL();
+ if (nullptr == streamInfo) {
+ return BAD_VALUE;
+ }
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1375,10 +1376,12 @@
return idx;
}
- if (width) *width = mOutputStreams[idx]->getWidth();
- if (height) *height = mOutputStreams[idx]->getHeight();
- if (format) *format = mOutputStreams[idx]->getFormat();
- if (dataSpace) *dataSpace = mOutputStreams[idx]->getDataSpace();
+ streamInfo->width = mOutputStreams[idx]->getWidth();
+ streamInfo->height = mOutputStreams[idx]->getHeight();
+ streamInfo->format = mOutputStreams[idx]->getFormat();
+ streamInfo->dataSpace = mOutputStreams[idx]->getDataSpace();
+ streamInfo->formatOverridden = mOutputStreams[idx]->isFormatOverridden();
+ streamInfo->originalFormat = mOutputStreams[idx]->getOriginalFormat();
return OK;
}
@@ -3233,6 +3236,8 @@
}
HalStream &src = finalConfiguration.streams[realIdx];
+ Camera3Stream* dstStream = Camera3Stream::cast(dst);
+ dstStream->setFormatOverride(false);
int overrideFormat = mapToFrameworkFormat(src.overrideFormat);
if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
if (dst->format != overrideFormat) {
@@ -3240,6 +3245,8 @@
streamId, dst->format);
}
} else {
+ dstStream->setFormatOverride((dst->format != overrideFormat) ? true : false);
+ dstStream->setOriginalFormat(dst->format);
// Override allowed with IMPLEMENTATION_DEFINED
dst->format = overrideFormat;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 363bd88..298a3d8 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -128,9 +128,7 @@
uint32_t width, uint32_t height, int format,
int *id) override;
- status_t getStreamInfo(int id,
- uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace) override;
+ status_t getStreamInfo(int id, StreamInfo *streamInfo) override;
status_t setStreamTransform(int id, int transform) override;
status_t deleteStream(int id) override;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 25e44a5..c186208 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -62,7 +62,9 @@
mPrepared(false),
mPreparedBufferIdx(0),
mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
- mBufferLimitLatency(kBufferLimitLatencyBinSize) {
+ mBufferLimitLatency(kBufferLimitLatencyBinSize),
+ mFormatOverridden(false),
+ mOriginalFormat(-1) {
camera3_stream::stream_type = type;
camera3_stream::width = width;
@@ -112,6 +114,22 @@
mUsage = usage;
}
+void Camera3Stream::setFormatOverride(bool formatOverridden) {
+ mFormatOverridden = formatOverridden;
+}
+
+bool Camera3Stream::isFormatOverridden() {
+ return mFormatOverridden;
+}
+
+void Camera3Stream::setOriginalFormat(int originalFormat) {
+ mOriginalFormat = originalFormat;
+}
+
+int Camera3Stream::getOriginalFormat() {
+ return mOriginalFormat;
+}
+
camera3_stream* Camera3Stream::startConfiguration() {
ATRACE_CALL();
Mutex::Autolock l(mLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 9090f83..1843ae8 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -146,6 +146,10 @@
android_dataspace getDataSpace() const;
uint64_t getUsage() const;
void setUsage(uint64_t usage);
+ void setFormatOverride(bool formatOverriden);
+ bool isFormatOverridden();
+ void setOriginalFormat(int originalFormat);
+ int getOriginalFormat();
camera3_stream* asHalStream() override {
return this;
@@ -514,6 +518,10 @@
// max_buffers.
static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
CameraLatencyHistogram mBufferLimitLatency;
+
+ //Keep track of original format in case it gets overridden
+ bool mFormatOverridden;
+ int mOriginalFormat;
}; // class Camera3Stream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 0544a1b..63456c4 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -71,6 +71,10 @@
virtual uint32_t getHeight() const = 0;
virtual int getFormat() const = 0;
virtual android_dataspace getDataSpace() const = 0;
+ virtual void setFormatOverride(bool formatOverriden) = 0;
+ virtual bool isFormatOverridden() = 0;
+ virtual void setOriginalFormat(int originalFormat) = 0;
+ virtual int getOriginalFormat() = 0;
/**
* Get a HAL3 handle for the stream, without starting stream configuration.
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 2836525..2443301 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -257,9 +257,21 @@
break;
}
- item->setPkgName(getPkgName(item->getUid(), true));
- item->setPkgVersionCode(0);
- ALOGV("info is from uid %d pkg '%s', version %d", item->getUid(), item->getPkgName().c_str(), item->getPkgVersionCode());
+ // Overwrite package name and version if the caller was untrusted.
+ if (!isTrusted) {
+ item->setPkgName(getPkgName(item->getUid(), true));
+ item->setPkgVersionCode(0);
+ } else if (item->getPkgName().empty()) {
+ // Only overwrite the package name if it was empty. Trust whatever
+ // version code was provided by the trusted caller.
+ item->setPkgName(getPkgName(uid, true));
+ }
+
+ ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
+ "sanitized pkg version: %d",
+ uid_given, item->getUid(),
+ item->getPkgName().c_str(),
+ item->getPkgVersionCode());
mItemsSubmitted++;
@@ -638,11 +650,6 @@
// are they alike enough that nitem can be folded into oitem?
static bool compatibleItems(MediaAnalyticsItem * oitem, MediaAnalyticsItem * nitem) {
- if (0) {
- ALOGD("Compare: o %s n %s",
- oitem->toString().c_str(), nitem->toString().c_str());
- }
-
// general safety
if (nitem->getUid() != oitem->getUid()) {
return false;
diff --git a/services/mediaanalytics/mediametrics.rc b/services/mediaanalytics/mediametrics.rc
index 3829f8c..1efde5e 100644
--- a/services/mediaanalytics/mediametrics.rc
+++ b/services/mediaanalytics/mediametrics.rc
@@ -1,5 +1,6 @@
service mediametrics /system/bin/mediametrics
class main
user media
+ group media
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 855ae69..5b34895 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -215,8 +215,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- aaudio_result_t result = serviceStream->start();
- return result;
+ return serviceStream->start();
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 58213f8..4be25c8 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -250,6 +250,8 @@
aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
audio_port_handle_t *clientHandle) {
+ // Start the client on behalf of the AAudio service.
+ // Use the port handle that was provided by openMmapStream().
return startClient(mMmapClient, &mPortHandle);
}
@@ -262,11 +264,12 @@
aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
audio_port_handle_t *clientHandle) {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+ ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d))",
+ &client, client.clientUid, client.clientPid);
audio_port_handle_t originalHandle = *clientHandle;
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->start(client,
- clientHandle));
- ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d), %d => %d) returns %d",
- &client, client.clientUid, client.clientPid,
+ status_t status = mMmapStream->start(client, clientHandle);
+ aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
+ ALOGD("AAudioServiceEndpointMMAP::startClient() , %d => %d returns %d",
originalHandle, *clientHandle, result);
return result;
}
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 43d73b7..18dc12f 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -122,7 +122,6 @@
startSharingThread_l();
}
if (result == AAUDIO_OK) {
- ALOGD("AAudioServiceEndpointShared::startStream() use shared stream client.");
result = getStreamInternal()->startClient(sharedStream->getAudioClient(), clientHandle);
}
return result;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 2dc62a0..ca7b528 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -42,7 +42,7 @@
AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
: mUpMessageQueue(nullptr)
- , mAAudioThread()
+ , mTimestampThread()
, mAtomicTimestamp()
, mAudioService(audioService) {
mMmapClient.clientUid = -1;
@@ -54,10 +54,10 @@
ALOGD("AAudioServiceStreamBase::~AAudioServiceStreamBase() destroying %p", this);
// If the stream is deleted when OPEN or in use then audio resources will leak.
// This would indicate an internal error. So we want to find this ASAP.
- LOG_ALWAYS_FATAL_IF(!(mState == AAUDIO_STREAM_STATE_CLOSED
- || mState == AAUDIO_STREAM_STATE_UNINITIALIZED
- || mState == AAUDIO_STREAM_STATE_DISCONNECTED),
- "service stream still open, state = %d", mState);
+ LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
+ || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
+ || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
+ "service stream still open, state = %d", getState());
}
std::string AAudioServiceStreamBase::dumpHeader() {
@@ -71,7 +71,7 @@
<< std::dec << std::setfill(' ') ;
result << std::setw(6) << mMmapClient.clientUid;
result << std::setw(4) << (isRunning() ? "yes" : " no");
- result << std::setw(6) << mState;
+ result << std::setw(6) << getState();
result << std::setw(7) << getFormat();
result << std::setw(6) << mFramesPerBurst;
result << std::setw(5) << getSamplesPerFrame();
@@ -124,7 +124,7 @@
aaudio_result_t AAudioServiceStreamBase::close() {
aaudio_result_t result = AAUDIO_OK;
- if (mState == AAUDIO_STREAM_STATE_CLOSED) {
+ if (getState() == AAUDIO_STREAM_STATE_CLOSED) {
return AAUDIO_OK;
}
@@ -146,37 +146,50 @@
mUpMessageQueue = nullptr;
}
- mState = AAUDIO_STREAM_STATE_CLOSED;
+ setState(AAUDIO_STREAM_STATE_CLOSED);
return result;
}
+aaudio_result_t AAudioServiceStreamBase::startDevice() {
+ mClientHandle = AUDIO_PORT_HANDLE_NONE;
+ return mServiceEndpoint->startStream(this, &mClientHandle);
+}
+
/**
* Start the flow of audio data.
*
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamBase::start() {
+ aaudio_result_t result = AAUDIO_OK;
if (isRunning()) {
return AAUDIO_OK;
}
if (mServiceEndpoint == nullptr) {
ALOGE("AAudioServiceStreamBase::start() missing endpoint");
- return AAUDIO_ERROR_INVALID_STATE;
+ result = AAUDIO_ERROR_INVALID_STATE;
+ goto error;
}
+
+ // Start with fresh presentation timestamps.
+ mAtomicTimestamp.clear();
+
mClientHandle = AUDIO_PORT_HANDLE_NONE;
- aaudio_result_t result = mServiceEndpoint->startStream(this, &mClientHandle);
- if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamBase::start() mServiceEndpoint returned %d", result);
- disconnect();
- } else {
- if (result == AAUDIO_OK) {
- sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
- mState = AAUDIO_STREAM_STATE_STARTED;
- mThreadEnabled.store(true);
- result = mAAudioThread.start(this);
- }
- }
+ result = startDevice();
+ if (result != AAUDIO_OK) goto error;
+
+ // This should happen at the end of the start.
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ mThreadEnabled.store(true);
+ result = mTimestampThread.start(this);
+ if (result != AAUDIO_OK) goto error;
+
+ return result;
+
+error:
+ disconnect();
return result;
}
@@ -197,13 +210,13 @@
sendCurrentTimestamp();
mThreadEnabled.store(false);
- result = mAAudioThread.stop();
+ result = mTimestampThread.stop();
if (result != AAUDIO_OK) {
disconnect();
return result;
}
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
- mState = AAUDIO_STREAM_STATE_PAUSED;
+ setState(AAUDIO_STREAM_STATE_PAUSED);
return result;
}
@@ -234,7 +247,7 @@
}
sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
- mState = AAUDIO_STREAM_STATE_STOPPED;
+ setState(AAUDIO_STREAM_STATE_STOPPED);
return result;
}
@@ -242,20 +255,20 @@
aaudio_result_t result = AAUDIO_OK;
// clear flag that tells thread to loop
if (mThreadEnabled.exchange(false)) {
- result = mAAudioThread.stop();
+ result = mTimestampThread.stop();
}
return result;
}
aaudio_result_t AAudioServiceStreamBase::flush() {
- if (mState != AAUDIO_STREAM_STATE_PAUSED) {
+ if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
ALOGE("AAudioServiceStreamBase::flush() stream not paused, state = %s",
AAudio_convertStreamStateToText(mState));
return AAUDIO_ERROR_INVALID_STATE;
}
// Data will get flushed when the client receives the FLUSHED event.
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
- mState = AAUDIO_STREAM_STATE_FLUSHED;
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
return AAUDIO_OK;
}
@@ -283,9 +296,9 @@
}
void AAudioServiceStreamBase::disconnect() {
- if (mState != AAUDIO_STREAM_STATE_DISCONNECTED) {
+ if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
- mState = AAUDIO_STREAM_STATE_DISCONNECTED;
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
}
}
@@ -321,6 +334,9 @@
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
+ ALOGV("sendCurrentTimestamp() SERVICE %8lld at %lld",
+ (long long) command.timestamp.position,
+ (long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_SERVICE;
result = writeUpMessageQueue(&command);
@@ -329,13 +345,16 @@
result = getHardwareTimestamp(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
+ ALOGV("sendCurrentTimestamp() HARDWARE %8lld at %lld",
+ (long long) command.timestamp.position,
+ (long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_HARDWARE;
result = writeUpMessageQueue(&command);
}
}
}
- if (result == AAUDIO_ERROR_UNAVAILABLE) {
+ if (result == AAUDIO_ERROR_UNAVAILABLE) { // TODO review best error code
result = AAUDIO_OK; // just not available yet, try again later
}
return result;
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 301795d..6f61401 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -191,6 +191,12 @@
mState = state;
}
+ /**
+ * Device specific startup.
+ * @return AAUDIO_OK or negative error.
+ */
+ virtual aaudio_result_t startDevice();
+
aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
aaudio_result_t sendCurrentTimestamp();
@@ -213,7 +219,7 @@
SharedRingBuffer* mUpMessageQueue;
std::mutex mUpMessageQueueLock;
- AAudioThread mAAudioThread;
+ AAudioThread mTimestampThread;
// This is used by one thread to tell another thread to exit. So it must be atomic.
std::atomic<bool> mThreadEnabled{false};
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 47041c5..a629ed6 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -50,7 +50,7 @@
}
aaudio_result_t AAudioServiceStreamMMAP::close() {
- if (mState == AAUDIO_STREAM_STATE_CLOSED) {
+ if (getState() == AAUDIO_STREAM_STATE_CLOSED) {
return AAUDIO_OK;
}
@@ -67,7 +67,6 @@
aaudio_result_t result = AAudioServiceStreamBase::open(request,
AAUDIO_SHARING_MODE_EXCLUSIVE);
if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamBase open returned %d", result);
return result;
}
@@ -85,13 +84,10 @@
/**
* Start the flow of data.
*/
-aaudio_result_t AAudioServiceStreamMMAP::start() {
- if (isRunning()) {
- return AAUDIO_OK;
- }
-
- aaudio_result_t result = AAudioServiceStreamBase::start();
+aaudio_result_t AAudioServiceStreamMMAP::startDevice() {
+ aaudio_result_t result = AAudioServiceStreamBase::startDevice();
if (!mInService && result == AAUDIO_OK) {
+ // Note that this can sometimes take 200 to 300 msec for a cold start!
result = startClient(mMmapClient, &mClientHandle);
}
return result;
@@ -126,6 +122,7 @@
aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
audio_port_handle_t *clientHandle) {
+ // Start the client on behalf of the application. Generate a new porthandle.
aaudio_result_t result = mServiceEndpoint->startClient(client, clientHandle);
return result;
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index bf0aab3..83cd2ef 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -53,14 +53,6 @@
aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
/**
- * Start the flow of audio data.
- *
- * This is not guaranteed to be synchronous but it currently is.
- * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
- */
- aaudio_result_t start() override;
-
- /**
* Stop the flow of data so that start() can resume without loss of data.
*
* This is not guaranteed to be synchronous but it currently is.
@@ -89,6 +81,12 @@
aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+ /**
+ * Device specific startup.
+ * @return AAUDIO_OK or negative error.
+ */
+ aaudio_result_t startDevice() override;
+
private:
bool mInService = false;
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 834f39f..348d407 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -237,9 +237,15 @@
aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
int64_t *timeNanos) {
- aaudio_result_t result = mServiceEndpoint->getTimestamp(positionFrames, timeNanos);
+ int64_t position = 0;
+ aaudio_result_t result = mServiceEndpoint->getTimestamp(&position, timeNanos);
if (result == AAUDIO_OK) {
- *positionFrames -= mTimestampPositionOffset.load(); // Offset from shared MMAP stream
+ int64_t offset = mTimestampPositionOffset.load();
+ // TODO, do not go below starting value
+ position -= offset; // Offset from shared MMAP stream
+ ALOGV("getHardwareTimestamp() %8lld = %8lld - %8lld",
+ (long long) position, (long long) (position + offset), (long long) offset);
}
+ *positionFrames = position;
return result;
}