Merge changes from topic 'api1_zsl_rework'
* changes:
Camera2: Add native test for framework-backed ZSL
Camera: Rework ZSL path when using API1 with HAL3
diff --git a/CleanSpec.mk b/CleanSpec.mk
index d0890fe..34d040e 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -59,6 +59,12 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicymanager.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicyservice_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicymanager_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/liboboe.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/liboboe.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj_arm/STATIC_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj_arm/SHARED_LIBRARIES/liboboe*)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index ece64fd..60effe2 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -103,7 +103,7 @@
// establish binder interface to camera service
template <typename TCam, typename TCamTraits>
-const sp<::android::hardware::ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
+const sp<::android::hardware::ICameraService> CameraBase<TCam, TCamTraits>::getCameraService()
{
Mutex::Autolock _l(gLock);
if (gCameraService.get() == 0) {
@@ -141,7 +141,7 @@
ALOGV("%s: connect", __FUNCTION__);
sp<TCam> c = new TCam(cameraId);
sp<TCamCallbacks> cl = c;
- const sp<::android::hardware::ICameraService>& cs = getCameraService();
+ const sp<::android::hardware::ICameraService> cs = getCameraService();
binder::Status ret;
if (cs != nullptr) {
@@ -249,7 +249,7 @@
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId,
struct hardware::CameraInfo* cameraInfo) {
- const sp<::android::hardware::ICameraService>& cs = getCameraService();
+ const sp<::android::hardware::ICameraService> cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
binder::Status res = cs->getCameraInfo(cameraId, cameraInfo);
return res.isOk() ? OK : res.serviceSpecificErrorCode();
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 4daf35b..0597950 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -23,11 +23,19 @@
#include <binder/Parcel.h>
#include <gui/Surface.h>
+#include <gui/view/Surface.h>
namespace android {
namespace hardware {
namespace camera2 {
+// These must be in the .cpp (to avoid inlining)
+CaptureRequest::CaptureRequest() = default;
+CaptureRequest::~CaptureRequest() = default;
+CaptureRequest::CaptureRequest(const CaptureRequest& rhs) = default;
+CaptureRequest::CaptureRequest(CaptureRequest&& rhs) noexcept = default;
+
+
status_t CaptureRequest::readFromParcel(const android::Parcel* parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index f570b7f..468a1eb 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -22,7 +22,7 @@
#include <camera/camera2/OutputConfiguration.h>
#include <binder/Parcel.h>
-#include <gui/Surface.h>
+#include <gui/view/Surface.h>
#include <utils/String8.h>
namespace android {
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
index 7e77aac..8e3cc40 100644
--- a/drm/libmediadrm/Android.mk
+++ b/drm/libmediadrm/Android.mk
@@ -13,7 +13,7 @@
IDrmClient.cpp \
IMediaDrmService.cpp \
SharedLibrary.cpp
-ifeq ($(ENABLE_TREBLE_DRM), true)
+ifneq ($(DISABLE_TREBLE_DRM), true)
LOCAL_SRC_FILES += \
DrmHal.cpp \
CryptoHal.cpp
@@ -31,7 +31,7 @@
libmediautils \
libstagefright_foundation \
libutils
-ifeq ($(ENABLE_TREBLE_DRM), true)
+ifneq ($(DISABLE_TREBLE_DRM), true)
LOCAL_SHARED_LIBRARIES += \
android.hidl.base@1.0 \
android.hardware.drm@1.0 \
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
index 07e9414..e3176e3 100644
--- a/drm/libmediadrm/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -303,7 +303,8 @@
return true;
}
-status_t Drm::createPlugin(const uint8_t uuid[16]) {
+status_t Drm::createPlugin(const uint8_t uuid[16],
+ const String8& /* appPackageName */) {
Mutex::Autolock autoLock(mLock);
if (mPlugin != NULL) {
@@ -319,7 +320,12 @@
}
status_t result = mFactory->createDrmPlugin(uuid, &mPlugin);
- mPlugin->setListener(this);
+ if (mPlugin) {
+ mPlugin->setListener(this);
+ } else {
+ ALOGE("Failed to create plugin");
+ return UNEXPECTED_NULL;
+ }
return result;
}
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 304cdaf..4ef1f47 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -202,24 +202,27 @@
if (factory == NULL) {
ALOGE("Failed to make drm factory");
}
+ ALOGD("makeDrmFactory: service is %s",
+ factory->isRemote() ? "Remote" : "Not Remote");
return factory;
}
-sp<IDrmPlugin> DrmHal::makeDrmPlugin(const uint8_t uuid[16]) {
+sp<IDrmPlugin> DrmHal::makeDrmPlugin(const uint8_t uuid[16],
+ const String8& appPackageName) {
if (mFactory == NULL){
return NULL;
}
sp<IDrmPlugin> plugin;
- Return<void> hResult = mFactory->createPlugin(uuid,
+ Return<void> hResult = mFactory->createPlugin(uuid, appPackageName.string(),
[&](Status status, const sp<IDrmPlugin>& hPlugin) {
- if (status != Status::OK) {
- ALOGD("Failed to make drm plugin");
- return;
- }
- plugin = hPlugin;
- }
- );
+ if (status != Status::OK) {
+ ALOGD("Failed to make drm plugin");
+ return;
+ }
+ plugin = hPlugin;
+ }
+ );
return plugin;
}
@@ -350,10 +353,11 @@
return result;
}
-status_t DrmHal::createPlugin(const uint8_t uuid[16]) {
+status_t DrmHal::createPlugin(const uint8_t uuid[16],
+ const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
- mPlugin = makeDrmPlugin(uuid);
+ mPlugin = makeDrmPlugin(uuid, appPackageName);
if (mPlugin == NULL) {
mInitCheck = ERROR_UNSUPPORTED;
@@ -597,8 +601,7 @@
}
status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey) {
+ Vector<uint8_t> &certificate, Vector<uint8_t> &wrappedKey) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -783,10 +786,8 @@
}
status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) {
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -811,10 +812,8 @@
}
status_t DrmHal::decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) {
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -839,9 +838,8 @@
}
status_t DrmHal::sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature) {
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -866,10 +864,8 @@
}
status_t DrmHal::verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match) {
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+ Vector<uint8_t> const &signature, bool &match) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -896,10 +892,8 @@
}
status_t DrmHal::signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature) {
+ String8 const &algorithm, Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey, Vector<uint8_t> &signature) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index c4558c6..4e47112 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -88,13 +88,15 @@
return reply.readInt32() != 0;
}
- virtual status_t createPlugin(const uint8_t uuid[16]) {
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8& appPackageName) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
data.write(uuid, 16);
-
+ data.writeString8(appPackageName);
status_t status = remote()->transact(CREATE_PLUGIN, data, &reply);
if (status != OK) {
+ ALOGE("createPlugin: binder call failed: %d", status);
return status;
}
@@ -585,7 +587,6 @@
data.read(uuid, sizeof(uuid));
String8 mimeType = data.readString8();
reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
-
return OK;
}
@@ -594,7 +595,8 @@
CHECK_INTERFACE(IDrm, data, reply);
uint8_t uuid[16];
data.read(uuid, sizeof(uuid));
- reply->writeInt32(createPlugin(uuid));
+ String8 appPackageName = data.readString8();
+ reply->writeInt32(createPlugin(uuid, appPackageName));
return OK;
}
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
index d27956c..c83321b 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
@@ -44,7 +44,8 @@
}
android::status_t DrmFactory::createDrmPlugin(
- const uint8_t uuid[16], android::DrmPlugin** plugin) {
+ const uint8_t uuid[16],
+ android::DrmPlugin** plugin) {
if (!isCryptoSchemeSupported(uuid)) {
*plugin = NULL;
return android::BAD_VALUE;
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.h b/drm/mediadrm/plugins/clearkey/DrmFactory.h
index 87db982..0bc0843 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.h
@@ -35,7 +35,8 @@
virtual bool isContentTypeSupported(const android::String8 &mimeType);
virtual android::status_t createDrmPlugin(
- const uint8_t uuid[16], android::DrmPlugin** plugin);
+ const uint8_t uuid[16],
+ android::DrmPlugin** plugin);
private:
DISALLOW_EVIL_CONSTRUCTORS(DrmFactory);
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index a38cca9..c82b9d9 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -56,7 +56,8 @@
return true;
}
- status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16], DrmPlugin **plugin)
+ status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16],
+ DrmPlugin **plugin)
{
*plugin = new MockDrmPlugin();
return OK;
@@ -729,7 +730,7 @@
ssize_t MockDrmPlugin::findSession(Vector<uint8_t> const &sessionId) const
{
- ALOGD("findSession: nsessions=%d, size=%d", mSessions.size(), sessionId.size());
+ ALOGD("findSession: nsessions=%u, size=%u", mSessions.size(), sessionId.size());
for (size_t i = 0; i < mSessions.size(); ++i) {
if (memcmp(mSessions[i].array(), sessionId.array(), sessionId.size()) == 0) {
return i;
@@ -740,7 +741,7 @@
ssize_t MockDrmPlugin::findKeySet(Vector<uint8_t> const &keySetId) const
{
- ALOGD("findKeySet: nkeySets=%d, size=%d", mKeySets.size(), keySetId.size());
+ ALOGD("findKeySet: nkeySets=%u, size=%u", mKeySets.size(), keySetId.size());
for (size_t i = 0; i < mKeySets.size(); ++i) {
if (memcmp(mKeySets[i].array(), keySetId.array(), keySetId.size()) == 0) {
return i;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index 98bdd69..9f8db17 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -33,7 +33,8 @@
bool isCryptoSchemeSupported(const uint8_t uuid[16]);
bool isContentTypeSupported(const String8 &mimeType);
- status_t createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin);
+ status_t createDrmPlugin(const uint8_t uuid[16],
+ DrmPlugin **plugin);
};
class MockCryptoFactory : public CryptoFactory {
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 03fbdfd..74a2dce 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -141,7 +141,7 @@
virtual void binderDied(const wp<IBinder>& who);
// helper function to obtain camera service handle
- static const sp<::android::hardware::ICameraService>& getCameraService();
+ static const sp<::android::hardware::ICameraService> getCameraService();
sp<TCamUser> mCamera;
status_t mStatus;
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index 978f48d..0180183 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -30,6 +30,16 @@
namespace camera2 {
struct CaptureRequest : public Parcelable {
+
+ // those are needed so we can use a forward declaration of Surface, otherwise
+ // the type is incomplete when the ctor/dtors are generated. This has the added
+ // benefit that ctor/dtors are not inlined, which is good because they're not trivial
+ // (because of the vtable and Vector<>)
+ CaptureRequest();
+ CaptureRequest(const CaptureRequest& rhs);
+ CaptureRequest(CaptureRequest&& rhs) noexcept;
+ virtual ~CaptureRequest();
+
CameraMetadata mMetadata;
Vector<sp<Surface> > mSurfaceList;
bool mIsReprocess;
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 4c64242..853d318 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIOSYSTEM_H_
#define ANDROID_AUDIOSYSTEM_H_
+#include <sys/types.h>
+
#include <media/AudioPolicy.h>
#include <media/AudioIoDescriptor.h>
#include <media/IAudioFlingerClient.h>
diff --git a/include/media/Drm.h b/include/media/Drm.h
index d40019b..fc869cc 100644
--- a/include/media/Drm.h
+++ b/include/media/Drm.h
@@ -40,7 +40,7 @@
virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
- virtual status_t createPlugin(const uint8_t uuid[16]);
+ virtual status_t createPlugin(const uint8_t uuid[16], const String8 &appPackageName);
virtual status_t destroyPlugin();
diff --git a/include/media/DrmHal.h b/include/media/DrmHal.h
index aaea2c9..82d2555 100644
--- a/include/media/DrmHal.h
+++ b/include/media/DrmHal.h
@@ -49,7 +49,8 @@
virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
- virtual status_t createPlugin(const uint8_t uuid[16]);
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName);
virtual status_t destroyPlugin();
@@ -169,7 +170,8 @@
status_t mInitCheck;
sp<IDrmFactory> makeDrmFactory();
- sp<IDrmPlugin> makeDrmPlugin(const uint8_t uuid[16]);
+ sp<IDrmPlugin> makeDrmPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName);
void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index fd51fd0..a57e372 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -34,7 +34,8 @@
virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
- virtual status_t createPlugin(const uint8_t uuid[16]) = 0;
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName) = 0;
virtual status_t destroyPlugin() = 0;
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
index e0a81f1..06db359 100644
--- a/include/media/IMediaExtractor.h
+++ b/include/media/IMediaExtractor.h
@@ -42,6 +42,8 @@
// returns an empty metadata object.
virtual sp<MetaData> getMetaData() = 0;
+ virtual status_t getMetrics(Parcel *reply) = 0;
+
enum Flags {
CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
new file mode 100644
index 0000000..8098e79
--- /dev/null
+++ b/include/media/MmapStreamCallback.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
+#define ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+
+class MmapStreamCallback : public virtual RefBase {
+ public:
+
+ /**
+ * The mmap stream should be torn down because conditions that permitted its creation with
+ * the requested parameters have changed and do not allow it to operate with the requested
+ * constraints any more.
+ */
+ virtual void onTearDown() = 0;
+
+ /**
+ * The volume to be applied to the use case specified when opening the stream has changed
+ * \param[in] channels a channel mask containing all channels the volume should be applied to.
+ * \param[in] values the volume values to be applied to each channel. The size of the vector
+ * should correspond to the channel count retrieved with
+ * audio_channel_count_from_in_mask() or audio_channel_count_from_out_mask()
+ */
+ virtual void onVolumeChanged(audio_channel_mask_t channels, Vector<float> values) = 0;
+
+ /**
+ * The device the stream is routed to/from has changed
+ * \param[in] onRoutingChanged the unique device ID of the new device.
+ */
+ virtual void onRoutingChanged(audio_port_handle_t deviceId) = 0;
+
+ protected:
+ MmapStreamCallback() {}
+ virtual ~MmapStreamCallback() {}
+};
+
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
diff --git a/include/media/MmapStreamInterface.h b/include/media/MmapStreamInterface.h
new file mode 100644
index 0000000..9f3731e
--- /dev/null
+++ b/include/media/MmapStreamInterface.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
+#define ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class MmapStreamCallback;
+
+class MmapStreamInterface : public virtual RefBase
+{
+ public:
+
+ /**
+ * Values for direction argument passed to openMmapStream()
+ */
+ typedef enum {
+ DIRECTION_OUTPUT = 0, /**< open a playback mmap stream */
+ DIRECTION_INPUT, /**< open a capture mmap stream */
+ } stream_direction_t;
+
+ class Client {
+ public:
+ uid_t clientUid;
+ pid_t clientPid;
+ String16 packageName;
+ };
+ /**
+ * Open a playback or capture stream in MMAP mode at the audio HAL.
+ *
+ * \note This method is implemented by AudioFlinger
+ *
+ * \param[in] direction open a playback or capture stream.
+ * \param[in] attr audio attributes defining the main use case for this stream
+ * \param[in,out] config audio parameters (sampling rate, format ...) for the stream.
+ * Requested parameters as input,
+ * Actual parameters as output
+ * \param[in] client a Client struct describing the first client using this stream.
+ * \param[in,out] deviceId audio device the stream should preferably be routed to/from
+ * Requested as input,
+ * Actual as output
+ * \param[in] callback the MmapStreamCallback interface used by AudioFlinger to notify
+ * condition changes affecting the stream operation
+ * \param[out] interface the MmapStreamInterface interface controlling the created stream
+ * \return OK if the stream was successfully created.
+ * NO_INIT if AudioFlinger is not properly initialized
+ * BAD_VALUE if the stream cannot be opened because of invalid arguments
+ * INVALID_OPERATION if the stream cannot be opened because of platform limitations
+ */
+ static status_t openMmapStream(stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface);
+
+ /**
+ * Retrieve information on the mmap buffer used for audio samples transfer.
+ *
+ * \param[in] min_size_frames minimum buffer size requested. The actual buffer
+ * size returned in struct audio_mmap_buffer_info can be larger.
+ * \param[out] info address at which the mmap buffer information should be returned.
+ *
+ * \return OK if the buffer was allocated.
+ * NO_INIT in case of initialization error
+ * BAD_VALUE if the requested buffer size is too large
+ * INVALID_OPERATION if called out of sequence (e.g. buffer already allocated)
+ */
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ /**
+ * Read current read/write position in the mmap buffer with associated time stamp.
+ *
+ * \param[out] position address at which the mmap read/write position should be returned.
+ *
+ * \return OK if the position is successfully returned.
+ * NOT_ENOUGH_DATA if the position cannot be retrieved
+ * INVALID_OPERATION if called before createMmapBuffer()
+ */
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ /**
+ * Start a stream operating in mmap mode.
+ * createMmapBuffer() must be called before calling start()
+ *
+ * \param[in] client a Client struct describing the client starting on this stream.
+ * \param[out] handle unique handle for this instance. Used with stop().
+ * \return OK in case of success.
+ * INVALID_OPERATION if called out of sequence
+ */
+ virtual status_t start(const Client& client, audio_port_handle_t *handle) = 0;
+
+ /**
+ * Stop a stream operating in mmap mode.
+ * Must be called after start()
+ *
+ * \param[in] handle unique handle allocated by start().
+ * \return OK in case of success.
+ * INVALID_OPERATION if called out of sequence
+ */
+ virtual status_t stop(audio_port_handle_t handle) = 0;
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ MmapStreamInterface() {}
+
+ // The destructor automatically closes the stream.
+ virtual ~MmapStreamInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index acf2d31..ff10b8c 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -40,6 +40,12 @@
EVENT_RESERVED,
EVENT_STRING, // ASCII string, not NUL-terminated
EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
+ EVENT_INTEGER,
+ EVENT_FLOAT,
+ EVENT_PID,
+ EVENT_START_FMT, // logFormat start event: entry includes format string, following
+ // entries contain format arguments
+ EVENT_END_FMT, // end of logFormat argument list
};
// ---------------------------------------------------------------------------
@@ -73,6 +79,13 @@
// byte[2+mLength] duplicate copy of mLength to permit reverse scan
// byte[3+mLength] start of next log entry
+ static void appendInt(String8 *body, const void *data);
+ static void appendFloat(String8 *body, const void *data);
+ static void appendPID(String8 *body, const void *data);
+ static int handleFormat(const char *fmt, size_t length, const uint8_t *data,
+ String8 *timestamp, String8 *body);
+ static void appendTimestamp(String8 *body, const void *data);
+
public:
// Located in shared memory, must be POD.
@@ -133,7 +146,15 @@
virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
virtual void logvf(const char *fmt, va_list ap);
virtual void logTimestamp();
- virtual void logTimestamp(const struct timespec& ts);
+ virtual void logTimestamp(const struct timespec &ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logFormat(const char *fmt, ...);
+ virtual void logVFormat(const char *fmt, va_list ap);
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+
virtual bool isEnabled() const;
@@ -170,7 +191,12 @@
virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
virtual void logvf(const char *fmt, va_list ap);
virtual void logTimestamp();
- virtual void logTimestamp(const struct timespec& ts);
+ virtual void logTimestamp(const struct timespec &ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
virtual bool isEnabled() const;
virtual bool setEnabled(bool enabled);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 3420617..814a643 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -41,6 +41,14 @@
struct DescribeColorFormat2Params;
struct DataConverter;
+// Treble shared memory
+namespace hidl { namespace memory { namespace V1_0 {
+struct IAllocator;
+struct IMemory;
+}}};
+typedef hidl::memory::V1_0::IAllocator TAllocator;
+typedef hidl::memory::V1_0::IMemory TMemory;
+
struct ACodec : public AHierarchicalStateMachine, public CodecBase {
ACodec();
@@ -86,6 +94,12 @@
static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
+ // Read the flag from "media.use_treble_omx", save it locally, and return
+ // it.
+ bool updateTrebleFlag();
+ // Return the saved flag.
+ bool getTrebleFlag() const;
+
protected:
virtual ~ACodec();
@@ -218,6 +232,8 @@
sp<IOMX> mOMX;
sp<IOMXNode> mOMXNode;
int32_t mNodeGeneration;
+ bool mTrebleFlag;
+ sp<TAllocator> mAllocator[2];
sp<MemoryDealer> mDealer[2];
bool mUsingNativeWindow;
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 211f794..9ce6cc5 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -48,6 +48,8 @@
// returns an empty metadata object.
virtual sp<MetaData> getMetaData();
+ status_t getMetrics(Parcel *reply);
+
enum Flags {
CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
@@ -74,6 +76,8 @@
MediaAnalyticsItem *mAnalyticsItem;
+ virtual void populateMetrics();
+
private:
typedef bool (*SnifferFunc)(
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index fa855a8..63c3ca5 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -45,8 +45,9 @@
// Please update media/java/android/media/MediaMuxer.java if the
// OutputFormat is updated.
enum OutputFormat {
- OUTPUT_FORMAT_MPEG_4 = 0,
- OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_MPEG_4 = 0,
+ OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_THREE_GPP = 2,
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index e414757..ad0d37b 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -78,6 +78,7 @@
status_t getSampleTrackIndex(size_t *trackIndex);
status_t getSampleTime(int64_t *sampleTimeUs);
status_t getSampleMeta(sp<MetaData> *sampleMeta);
+ status_t getMetrics(Parcel *reply);
bool getCachedDuration(int64_t *durationUs, bool *eos) const;
diff --git a/include/media/stagefright/OMXClient.h b/include/media/stagefright/OMXClient.h
index 6973405..6b86cbf 100644
--- a/include/media/stagefright/OMXClient.h
+++ b/include/media/stagefright/OMXClient.h
@@ -27,6 +27,7 @@
OMXClient();
status_t connect();
+ status_t connectTreble();
void disconnect();
sp<IOMX> interface() {
diff --git a/media/liboboe/Android.bp b/media/libaaudio/Android.bp
similarity index 81%
rename from media/liboboe/Android.bp
rename to media/libaaudio/Android.bp
index bfcc049..e41d62b 100644
--- a/media/liboboe/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -13,16 +13,16 @@
// limitations under the License.
ndk_headers {
- name: "libOboe_headers",
+ name: "libAAudio_headers",
from: "include",
to: "",
- srcs: ["include/oboe/*.h"],
- license: "include/oboe/NOTICE",
+ srcs: ["include/aaudio/*.h"],
+ license: "include/aaudio/NOTICE",
}
ndk_library {
- name: "liboboe.ndk",
- symbol_file: "liboboe.map.txt",
+ name: "libaaudio.ndk",
+ symbol_file: "libaaudio.map.txt",
first_version: "26",
unversioned_until: "current",
}
diff --git a/media/liboboe/Android.mk b/media/libaaudio/Android.mk
similarity index 100%
rename from media/liboboe/Android.mk
rename to media/libaaudio/Android.mk
diff --git a/media/libaaudio/Doxyfile b/media/libaaudio/Doxyfile
new file mode 100644
index 0000000..5cce2ca
--- /dev/null
+++ b/media/libaaudio/Doxyfile
@@ -0,0 +1,2313 @@
+# Doxyfile 1.8.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "AAudio"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = include/aaudio/AAudioDefinitions.h \
+ include/aaudio/AAudio.h \
+ src/legacy/AudioStreamTrack.h \
+ src/legacy/AudioStreamRecord.h \
+ src/legacy/AAudioLegacy.h \
+ src/core/AudioStreamBuilder.h \
+ src/core/AudioStream.h \
+ src/utility/HandleTracker.h \
+ src/utility/MonotonicCounter.h \
+ src/utility/AudioClock.h \
+ src/utility/AAudioUtilities.h
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/media/libaaudio/Doxyfile.orig b/media/libaaudio/Doxyfile.orig
new file mode 100644
index 0000000..137facb
--- /dev/null
+++ b/media/libaaudio/Doxyfile.orig
@@ -0,0 +1,2303 @@
+# Doxyfile 1.8.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "My Project"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/media/libaaudio/README.md b/media/libaaudio/README.md
new file mode 100644
index 0000000..0c9050e
--- /dev/null
+++ b/media/libaaudio/README.md
@@ -0,0 +1,3 @@
+AAudio input/output API
+
+To generate Doxygen output, run command "doxygen" in this directory.
diff --git a/media/liboboe/examples/Android.mk b/media/libaaudio/examples/Android.mk
similarity index 100%
rename from media/liboboe/examples/Android.mk
rename to media/libaaudio/examples/Android.mk
diff --git a/media/liboboe/examples/write_sine/Android.mk b/media/libaaudio/examples/write_sine/Android.mk
similarity index 100%
rename from media/liboboe/examples/write_sine/Android.mk
rename to media/libaaudio/examples/write_sine/Android.mk
diff --git a/media/liboboe/examples/write_sine/README.md b/media/libaaudio/examples/write_sine/README.md
similarity index 70%
rename from media/liboboe/examples/write_sine/README.md
rename to media/libaaudio/examples/write_sine/README.md
index 9f7ee87..b150471 100644
--- a/media/liboboe/examples/write_sine/README.md
+++ b/media/libaaudio/examples/write_sine/README.md
@@ -1,6 +1,6 @@
# cd to this directory
-mkdir -p jni/include/oboe
-ln -s $PLATFORM/frameworks/av/media/liboboe/include/oboe/*.h jni/include/oboe
+mkdir -p jni/include/aaudio
+ln -s $PLATFORM/frameworks/av/media/liboboe/include/aaudio/*.h jni/include/aaudio
ln -s $PLATFORM/out/target/product/$TARGET_PRODUCT/symbols/out/soong/ndk/platforms/android-current/arch-arm64/usr/lib/liboboe.so jni
$NDK/ndk-build
adb push libs/arm64-v8a/write_sine_threaded /data
diff --git a/media/liboboe/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
similarity index 100%
rename from media/liboboe/examples/write_sine/jni/Android.mk
rename to media/libaaudio/examples/write_sine/jni/Android.mk
diff --git a/media/liboboe/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
similarity index 100%
rename from media/liboboe/examples/write_sine/jni/Application.mk
rename to media/libaaudio/examples/write_sine/jni/Application.mk
diff --git a/media/liboboe/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
similarity index 100%
rename from media/liboboe/examples/write_sine/src/SineGenerator.h
rename to media/libaaudio/examples/write_sine/src/SineGenerator.h
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
new file mode 100644
index 0000000..090f371
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using AAudio.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define SAMPLE_RATE 48000
+#define NUM_SECONDS 10
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+ const char *modeText = "unknown";
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ modeText = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_LEGACY:
+ modeText = "LEGACY";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ modeText = "SHARED";
+ break;
+ case AAUDIO_SHARING_MODE_PUBLIC_MIX:
+ modeText = "PUBLIC_MIX";
+ break;
+ default:
+ break;
+ }
+ return modeText;
+}
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ const int requestedSamplesPerFrame = 2;
+ int actualSamplesPerFrame = 0;
+ const int requestedSampleRate = SAMPLE_RATE;
+ int actualSampleRate = 0;
+ const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+ aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM16;
+
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+ aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+
+ AAudioStreamBuilder aaudioBuilder = AAUDIO_STREAM_BUILDER_NONE;
+ AAudioStream aaudioStream = AAUDIO_STREAM_NONE;
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ aaudio_size_frames_t framesPerBurst = 0;
+ aaudio_size_frames_t framesToPlay = 0;
+ aaudio_size_frames_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ int16_t *data = nullptr;
+
+ SineGenerator sineOsc1;
+ SineGenerator sineOsc2;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Play a sine wave using AAudio\n", argv[0]);
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ // Request stream properties.
+ result = AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+ result = AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+ result = AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+ result = AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+ printf("aaudioStream 0x%08x\n", aaudioStream);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ result = AAudioStream_getState(aaudioStream, &state);
+ printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Check to see what kind of stream we actually got.
+ result = AAudioStream_getSampleRate(aaudioStream, &actualSampleRate);
+ printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+ sineOsc1.setup(440.0, actualSampleRate);
+ sineOsc2.setup(660.0, actualSampleRate);
+
+ result = AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame);
+ printf("SamplesPerFrame: requested = %d, actual = %d\n",
+ requestedSamplesPerFrame, actualSamplesPerFrame);
+
+ result = AAudioStream_getSharingMode(aaudioStream, &actualSharingMode);
+ printf("SharingMode: requested = %s, actual = %s\n",
+ getSharingModeText(requestedSharingMode),
+ getSharingModeText(actualSharingMode));
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ result = AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst);
+ printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_getFramesPerBurst() returned %d\n", result);
+ goto finish;
+ }
+ // Some DMA might use very short bursts of 16 frames. We don't need to write such small
+ // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+ while (framesPerBurst < 48) {
+ framesPerBurst *= 2;
+ }
+ printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+
+ AAudioStream_getFormat(aaudioStream, &actualDataFormat);
+ printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ // TODO handle other data formats
+
+ // Allocate a buffer for the audio data.
+ data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+ if (data == nullptr) {
+ fprintf(stderr, "ERROR - could not allocate data buffer\n");
+ result = AAUDIO_ERROR_NO_MEMORY;
+ goto finish;
+ }
+
+ // Start the stream.
+ printf("call AAudioStream_requestStart()\n");
+ result = AAudioStream_requestStart(aaudioStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ goto finish;
+ }
+
+ result = AAudioStream_getState(aaudioStream, &state);
+ printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Play for a while.
+ framesToPlay = actualSampleRate * NUM_SECONDS;
+ framesLeft = framesToPlay;
+ while (framesLeft > 0) {
+ // Render sine waves to left and right channels.
+ sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+ if (actualSamplesPerFrame > 1) {
+ sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+ }
+
+ // Write audio data to the stream.
+ aaudio_nanoseconds_t timeoutNanos = 100 * AAUDIO_NANOS_PER_MILLISECOND;
+ int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+ int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
+ if (actual < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
+ goto finish;
+ } else if (actual == 0) {
+ fprintf(stderr, "WARNING - AAudioStream_write() returned %zd\n", actual);
+ goto finish;
+ }
+ framesLeft -= actual;
+ }
+
+ result = AAudioStream_getXRunCount(aaudioStream, &xRunCount);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+finish:
+ delete[] data;
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
new file mode 100644
index 0000000..4ea2807
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio background thread.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS 10
+
+#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+//#define SHARING_MODE AAUDIO_SHARING_MODE_LEGACY
+
+// Prototype for a callback.
+typedef int audio_callback_proc_t(float *outputBuffer,
+ aaudio_size_frames_t numFrames,
+ void *userContext);
+
+static void *SimpleAAudioPlayerThreadProc(void *arg);
+
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+ SimpleAAudioPlayer() {}
+ virtual ~SimpleAAudioPlayer() {
+ close();
+ };
+
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /** Also known as "sample rate"
+ */
+ int32_t getFramesPerSecond() {
+ return mFramesPerSecond;
+ }
+
+ int32_t getSamplesPerFrame() {
+ return mSamplesPerFrame;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(audio_callback_proc_t *proc, void *userContext) {
+ mCallbackProc = proc;
+ mUserContext = userContext;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ result = AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ if (result != AAUDIO_OK) goto finish1;
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) goto finish1;
+
+ // Check to see what kind of stream we actually got.
+ result = AAudioStream_getSampleRate(mStream, &mFramesPerSecond);
+ printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
+ if (result != AAUDIO_OK) goto finish2;
+
+ result = AAudioStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
+ printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
+ if (result != AAUDIO_OK) goto finish2;
+
+ {
+ aaudio_size_frames_t bufferCapacity;
+ result = AAudioStream_getBufferCapacity(mStream, &bufferCapacity);
+ if (result != AAUDIO_OK) goto finish2;
+ printf("open() got bufferCapacity = %d\n", bufferCapacity);
+ }
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ result = AAudioStream_getFramesPerBurst(mStream, &mFramesPerBurst);
+ if (result != AAUDIO_OK) goto finish2;
+ // Some DMA might use very short bursts. We don't need to write such small
+ // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+ while (mFramesPerBurst < 48) {
+ mFramesPerBurst *= 2;
+ }
+ printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
+
+ result = AAudioStream_getFormat(mStream, &mDataFormat);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_getFormat() returned %d\n", result);
+ goto finish2;
+ }
+
+ // Allocate a buffer for the audio data.
+ mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
+ if (mOutputBuffer == nullptr) {
+ fprintf(stderr, "ERROR - could not allocate data buffer\n");
+ result = AAUDIO_ERROR_NO_MEMORY;
+ }
+
+ // If needed allocate a buffer for converting float to int16_t.
+ if (mDataFormat == AAUDIO_FORMAT_PCM16) {
+ mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
+ if (mConversionBuffer == nullptr) {
+ fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
+ result = AAUDIO_ERROR_NO_MEMORY;
+ }
+ }
+ return result;
+
+ finish2:
+ AAudioStream_close(mStream);
+ mStream = AAUDIO_HANDLE_INVALID;
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = AAUDIO_HANDLE_INVALID;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != AAUDIO_HANDLE_INVALID) {
+ stop();
+ printf("call AAudioStream_close(0x%08x)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = AAUDIO_HANDLE_INVALID;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = AAUDIO_HANDLE_INVALID;
+ delete mOutputBuffer;
+ mOutputBuffer = nullptr;
+ delete mConversionBuffer;
+ mConversionBuffer = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Start a thread that will call the callback proc.
+ aaudio_result_t start() {
+ mEnabled = true;
+ aaudio_nanoseconds_t nanosPerBurst = mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+ / mFramesPerSecond;
+ return AAudioStream_createThread(mStream, nanosPerBurst,
+ SimpleAAudioPlayerThreadProc,
+ this);
+ }
+
+ // Tell the thread to stop.
+ aaudio_result_t stop() {
+ mEnabled = false;
+ return AAudioStream_joinThread(mStream, nullptr, 2 * AAUDIO_NANOS_PER_SECOND);
+ }
+
+ aaudio_result_t callbackLoop() {
+ int32_t framesWritten = 0;
+ int32_t xRunCount = 0;
+ aaudio_result_t result = AAUDIO_OK;
+
+ result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ return result;
+ }
+
+ // Give up after several burst periods have passed.
+ const int burstsPerTimeout = 8;
+ aaudio_nanoseconds_t nanosPerTimeout =
+ burstsPerTimeout * mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+ / mFramesPerSecond;
+
+ while (mEnabled && result >= 0) {
+ // Call application's callback function to fill the buffer.
+ if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
+ mEnabled = false;
+ }
+ // if needed, convert from float to int16_t PCM
+ if (mConversionBuffer != nullptr) {
+ int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
+ for (int i = 0; i < numSamples; i++) {
+ mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
+ }
+ // Write the application data to stream.
+ result = AAudioStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
+ } else {
+ // Write the application data to stream.
+ result = AAudioStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
+ }
+ framesWritten += result;
+ if (result < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", result);
+ }
+ }
+
+ result = AAudioStream_getXRunCount(mStream, &xRunCount);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+ result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ return result;
+ }
+
+ return result;
+ }
+
+private:
+ AAudioStreamBuilder mBuilder = AAUDIO_HANDLE_INVALID;
+ AAudioStream mStream = AAUDIO_HANDLE_INVALID;
+ float * mOutputBuffer = nullptr;
+ int16_t * mConversionBuffer = nullptr;
+
+ audio_callback_proc_t * mCallbackProc = nullptr;
+ void * mUserContext = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+ int32_t mSamplesPerFrame = 0;
+ int32_t mFramesPerSecond = 0;
+ aaudio_size_frames_t mFramesPerBurst = 0;
+ aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM16;
+
+ volatile bool mEnabled = false; // used to request that callback exit its loop
+};
+
+static void *SimpleAAudioPlayerThreadProc(void *arg) {
+ SimpleAAudioPlayer *player = (SimpleAAudioPlayer *) arg;
+ player->callbackLoop();
+ return nullptr;
+}
+
+// Application data that gets passed to the callback.
+typedef struct SineThreadedData_s {
+ SineGenerator sineOsc1;
+ SineGenerator sineOsc2;
+ int32_t samplesPerFrame = 0;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+int MyCallbackProc(float *outputBuffer, int32_t numFrames, void *userContext) {
+ SineThreadedData_t *data = (SineThreadedData_t *) userContext;
+ // Render sine waves to left and right channels.
+ data->sineOsc1.render(&outputBuffer[0], data->samplesPerFrame, numFrames);
+ if (data->samplesPerFrame > 1) {
+ data->sineOsc2.render(&outputBuffer[1], data->samplesPerFrame, numFrames);
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+ SimpleAAudioPlayer player;
+ SineThreadedData_t myData;
+ aaudio_result_t result;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ printf("%s - Play a sine wave using an AAudio Thread\n", argv[0]);
+
+ result = player.open(MyCallbackProc, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ goto error;
+ }
+ printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+ printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+ myData.sineOsc1.setup(440.0, 48000);
+ myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+ myData.sineOsc2.setup(660.0, 48000);
+ myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+ myData.samplesPerFrame = player.getSamplesPerFrame();
+
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
+ {
+ // FIXME sleep is not an NDK API
+ // sleep(NUM_SECONDS);
+ const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+ }
+ printf("Woke up now.\n");
+
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.stop() returned %d\n", result);
+ goto error;
+ }
+ result = player.close();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.close() returned %d\n", result);
+ goto error;
+ }
+
+ printf("SUCCESS\n");
+ return EXIT_SUCCESS;
+error:
+ player.close();
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return EXIT_FAILURE;
+}
+
diff --git a/media/liboboe/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
similarity index 84%
rename from media/liboboe/examples/write_sine/static/Android.mk
rename to media/libaaudio/examples/write_sine/static/Android.mk
index 7c8d17c..139b70a 100644
--- a/media/liboboe/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -4,7 +4,7 @@
LOCAL_MODULE_TAGS := examples
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include
+ frameworks/av/media/libaaudio/include
# TODO reorganize folders to avoid using ../
LOCAL_SRC_FILES:= ../src/write_sine.cpp
@@ -12,7 +12,7 @@
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
libaudioclient liblog libtinyalsa
-LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := write_sine
include $(BUILD_EXECUTABLE)
@@ -21,14 +21,14 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include
+ frameworks/av/media/libaaudio/include
LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
libaudioclient liblog libtinyalsa
-LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := write_sine_threaded
include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/write_sine/static/README.md b/media/libaaudio/examples/write_sine/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
new file mode 100644
index 0000000..dad5285
--- /dev/null
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is the 'C' ABI for AAudio.
+ */
+#ifndef AAUDIO_AAUDIO_H
+#define AAUDIO_AAUDIO_H
+
+#include "AAudioDefinitions.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef aaudio_handle_t AAudioStream;
+typedef aaudio_handle_t AAudioStreamBuilder;
+
+#define AAUDIO_STREAM_NONE ((AAudioStream)AAUDIO_HANDLE_INVALID)
+#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)AAUDIO_HANDLE_INVALID)
+
+/* AAUDIO_API will probably get defined in a Makefile for a specific platform. */
+#ifndef AAUDIO_API
+#define AAUDIO_API /* for exporting symbols */
+#endif
+
+// ============================================================
+// Audio System
+// ============================================================
+
+/**
+ * @return time in the same clock domain as the timestamps
+ */
+AAUDIO_API aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid);
+
+/**
+ * The text is the ASCII symbol corresponding to the returnCode,
+ * or an English message saying the returnCode is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio result code.
+ */
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode);
+
+/**
+ * The text is the ASCII symbol corresponding to the stream state,
+ * or an English message saying the state is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio state.
+ */
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state);
+
+// ============================================================
+// StreamBuilder
+// ============================================================
+
+/**
+ * Create a StreamBuilder that can be used to open a Stream.
+ *
+ * The deviceId is initially unspecified, meaning that the current default device will be used.
+ *
+ * The default direction is AAUDIO_DIRECTION_OUTPUT.
+ * The default sharing mode is AAUDIO_SHARING_MODE_LEGACY.
+ * The data format, samplesPerFrames and sampleRate are unspecified and will be
+ * chosen by the device when it is opened.
+ *
+ * AAudioStreamBuilder_delete() must be called when you are done using the builder.
+ */
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder);
+
+/**
+ * Request an audio device identified device using an ID.
+ * The ID is platform specific.
+ * On Android, for example, the ID could be obtained from the Java AudioManager.
+ *
+ * By default, the primary device will be used.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param deviceId platform specific identifier or AAUDIO_DEVICE_UNSPECIFIED
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+ aaudio_device_id_t deviceId);
+/**
+ * Passes back requested device ID.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
+ aaudio_device_id_t *deviceId);
+
+/**
+ * Request a sample rate in Hz.
+ * The stream may be opened with a different sample rate.
+ * So the application should query for the actual rate after the stream is opened.
+ *
+ * Technically, this should be called the "frame rate" or "frames per second",
+ * because it refers to the number of complete frames transferred per second.
+ * But it is traditionally called "sample rate". Se we use that term.
+ *
+ * Default is AAUDIO_UNSPECIFIED.
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+ aaudio_sample_rate_t sampleRate);
+
+/**
+ * Returns sample rate in Hertz (samples per second).
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
+ aaudio_sample_rate_t *sampleRate);
+
+
+/**
+ * Request a number of samples per frame.
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * Default is AAUDIO_UNSPECIFIED.
+ *
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+ int32_t samplesPerFrame);
+
+/**
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
+ int32_t *samplesPerFrame);
+
+
+/**
+ * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
+ * The application should query for the actual format after the stream is opened.
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+ aaudio_audio_format_t format);
+
+/**
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
+ aaudio_audio_format_t *format);
+
+/**
+ * Request a mode for sharing the device.
+ * The requested sharing mode may not be available.
+ * So the application should query for the actual mode after the stream is opened.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+ aaudio_sharing_mode_t sharingMode);
+
+/**
+ * Return requested sharing mode.
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
+ aaudio_sharing_mode_t *sharingMode);
+
+/**
+ * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+ aaudio_direction_t direction);
+
+/**
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param direction pointer to a variable to be set to the currently requested direction.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
+ aaudio_direction_t *direction);
+
+/**
+ * Set the requested maximum buffer capacity in frames.
+ * The final AAudioStream capacity may differ, but will probably be at least this big.
+ *
+ * Default is AAUDIO_UNSPECIFIED.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
+ aaudio_size_frames_t frames);
+
+/**
+ * Query the requested maximum buffer capacity in frames that was passed to
+ * AAudioStreamBuilder_setBufferCapacity().
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param frames pointer to variable to receive the requested buffer capacity
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
+ aaudio_size_frames_t *frames);
+
+/**
+ * Open a stream based on the options in the StreamBuilder.
+ *
+ * AAudioStream_close must be called when finished with the stream to recover
+ * the memory and to free the associated resources.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream handle
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
+ AAudioStream *stream);
+
+/**
+ * Delete the resources associated with the StreamBuilder.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder builder);
+
+// ============================================================
+// Stream Control
+// ============================================================
+
+/**
+ * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream stream);
+
+/**
+ * Asynchronously request to start playing the stream. For output streams, one should
+ * write to the stream to fill the buffer before starting.
+ * Otherwise it will underflow.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to pause.
+ * Pausing a stream will freeze the data flow but not flush any buffers.
+ * Use AAudioStream_Start() to resume playback after a pause.
+ * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to flush.
+ * Flushing will discard any pending data.
+ * This call only works if the stream is pausing or paused. TODO review
+ * Frame counters are not reset by a flush. They may be advanced.
+ * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to stop.
+ * The stream will stop after all of the data currently buffered has been played.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream stream);
+
+/**
+ * Query the current state, eg. AAUDIO_STREAM_STATE_PAUSING
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param state pointer to a variable that will be set to the current state
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state);
+
+/**
+ * Wait until the current state no longer matches the input state.
+ *
+ * <pre><code>
+ * aaudio_stream_state_t currentState;
+ * aaudio_result_t result = AAudioStream_getState(stream, ¤tState);
+ * while (result == AAUDIO_OK && currentState != AAUDIO_STREAM_STATE_PAUSING) {
+ * result = AAudioStream_waitForStateChange(
+ * stream, currentState, ¤tState, MY_TIMEOUT_NANOS);
+ * }
+ * </code></pre>
+ *
+ * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param inputState The state we want to avoid.
+ * @param nextState Pointer to a variable that will be set to the new state.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
+ aaudio_stream_state_t inputState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream I/O
+// ============================================================
+
+/**
+ * Read data from the stream.
+ *
+ * The call will wait until the read is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for data.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to read. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
+ void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+/**
+ * Write data to the stream.
+ *
+ * The call will wait until the write is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for room in the buffer.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to write. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
+ const void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+
+// ============================================================
+// High priority audio threads
+// ============================================================
+
+typedef void *(aaudio_audio_thread_proc_t)(void *);
+
+/**
+ * Create a thread associated with a stream. The thread has special properties for
+ * low latency audio performance. This thread can be used to implement a callback API.
+ *
+ * Only one thread may be associated with a stream.
+ *
+ * Note that this API is in flux.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
+ * @param threadProc your thread entry point
+ * @param arg an argument that will be passed to your thread entry point
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
+ aaudio_nanoseconds_t periodNanoseconds,
+ aaudio_audio_thread_proc_t *threadProc,
+ void *arg);
+
+/**
+ * Wait until the thread exits or an error occurs.
+ * The thread handle will be deleted.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param returnArg a pointer to a variable to receive the return value
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
+ void **returnArg,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+
+/**
+ * This can be used to adjust the latency of the buffer by changing
+ * the threshold where blocking will occur.
+ * By combining this with AAudioStream_getUnderrunCount(), the latency can be tuned
+ * at run-time for each device.
+ *
+ * This cannot be set higher than AAudioStream_getBufferCapacity().
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param actualFrames receives final number of frames
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
+ aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames);
+
+/**
+ * Query the maximum number of frames that can be filled without blocking.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer size
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream,
+ aaudio_size_frames_t *frames);
+
+/**
+ * Query the number of frames that are read or written by the endpoint at one time.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the burst size
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
+ aaudio_size_frames_t *frames);
+
+/**
+ * Query maximum buffer capacity in frames.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer capacity
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
+ aaudio_size_frames_t *frames);
+
+/**
+ * An XRun is an Underrun or an Overrun.
+ * During playing, an underrun will occur if the stream is not written in time
+ * and the system runs out of valid data.
+ * During recording, an overrun will occur if the stream is not read in time
+ * and there is no place to put the incoming data so it is discarded.
+ *
+ * An underrun or overrun can cause an audible "pop" or "glitch".
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param xRunCount pointer to variable to receive the underrun or overrun count
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param sampleRate pointer to variable to receive the actual sample rate
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream,
+ aaudio_sample_rate_t *sampleRate);
+
+/**
+ * The samplesPerFrame is also known as channelCount.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param samplesPerFrame pointer to variable to receive the actual samples per frame
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream,
+ int32_t *samplesPerFrame);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param deviceId pointer to variable to receive the actual device ID
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
+ aaudio_device_id_t *deviceId);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param format pointer to variable to receive the actual data format
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream,
+ aaudio_audio_format_t *format);
+
+/**
+ * Provide actual sharing mode.
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param sharingMode pointer to variable to receive the actual sharing mode
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
+ aaudio_sharing_mode_t *sharingMode);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param direction pointer to a variable to be set to the current direction.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream,
+ aaudio_direction_t *direction);
+
+/**
+ * Passes back the number of frames that have been written since the stream was created.
+ * For an output stream, this will be advanced by the application calling write().
+ * For an input stream, this will be advanced by the device or service.
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
+ aaudio_position_frames_t *frames);
+
+/**
+ * Passes back the number of frames that have been read since the stream was created.
+ * For an output stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the application calling read().
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream,
+ aaudio_position_frames_t *frames);
+
+/**
+ * Passes back the time at which a particular frame was presented.
+ * This can be used to synchronize audio with video or MIDI.
+ * It can also be used to align a recorded stream with a playback stream.
+ *
+ * Timestamps are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+ * AAUDIO_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Note that because requestStart() is asynchronous, timestamps will not be valid until
+ * a short time after calling requestStart().
+ * So AAUDIO_ERROR_INVALID_STATE should not be considered a fatal error.
+ * Just try calling again later.
+ *
+ * If an error occurs, then the position and time will not be modified.
+ *
+ * The position and time passed back are monotonically increasing.
+ *
+ * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
+ * @param framePosition pointer to a variable to receive the position
+ * @param timeNanoseconds pointer to a variable to receive the time
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
+ aaudio_clockid_t clockid,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //AAUDIO_AAUDIO_H
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
new file mode 100644
index 0000000..979b8c9
--- /dev/null
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIODEFINITIONS_H
+#define AAUDIO_AAUDIODEFINITIONS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int32_t aaudio_handle_t; // negative handles are error codes
+typedef int32_t aaudio_result_t;
+/**
+ * A platform specific identifier for a device.
+ */
+typedef int32_t aaudio_device_id_t;
+typedef int32_t aaudio_sample_rate_t;
+/** This is used for small quantities such as the number of frames in a buffer. */
+typedef int32_t aaudio_size_frames_t;
+/** This is used for small quantities such as the number of bytes in a frame. */
+typedef int32_t aaudio_size_bytes_t;
+/**
+ * This is used for large quantities, such as the number of frames that have
+ * been played since a stream was started.
+ * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
+ */
+typedef int64_t aaudio_position_frames_t;
+
+typedef int64_t aaudio_nanoseconds_t;
+
+/**
+ * This is used to represent a value that has not been specified.
+ * For example, an application could use AAUDIO_UNSPECIFIED to indicate
+ * that is did not not care what the specific value of a parameter was
+ * and would accept whatever it was given.
+ */
+#define AAUDIO_UNSPECIFIED 0
+#define AAUDIO_DEVICE_UNSPECIFIED ((aaudio_device_id_t) -1)
+#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
+#define AAUDIO_MILLIS_PER_SECOND 1000
+#define AAUDIO_NANOS_PER_SECOND (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
+
+#define AAUDIO_HANDLE_INVALID ((aaudio_handle_t)-1)
+
+enum aaudio_direction_t {
+ AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_DIRECTION_INPUT,
+ AAUDIO_DIRECTION_COUNT // This should always be last.
+};
+
+enum aaudio_audio_format_t {
+ AAUDIO_FORMAT_INVALID = -1,
+ AAUDIO_FORMAT_UNSPECIFIED = 0,
+ AAUDIO_FORMAT_PCM_I16,
+ AAUDIO_FORMAT_PCM_FLOAT,
+ AAUDIO_FORMAT_PCM_I8_24,
+ AAUDIO_FORMAT_PCM_I32
+};
+
+// TODO These are deprecated. Remove these aliases once all references are replaced.
+#define AAUDIO_FORMAT_PCM16 AAUDIO_FORMAT_PCM_I16
+#define AAUDIO_FORMAT_PCM824 AAUDIO_FORMAT_PCM_I8_24
+#define AAUDIO_FORMAT_PCM32 AAUDIO_FORMAT_PCM_I32
+
+enum {
+ AAUDIO_OK,
+ AAUDIO_ERROR_BASE = -900, // TODO review
+ AAUDIO_ERROR_DISCONNECTED,
+ AAUDIO_ERROR_ILLEGAL_ARGUMENT,
+ AAUDIO_ERROR_INCOMPATIBLE,
+ AAUDIO_ERROR_INTERNAL, // an underlying API returned an error code
+ AAUDIO_ERROR_INVALID_STATE,
+ AAUDIO_ERROR_UNEXPECTED_STATE,
+ AAUDIO_ERROR_UNEXPECTED_VALUE,
+ AAUDIO_ERROR_INVALID_HANDLE,
+ AAUDIO_ERROR_INVALID_QUERY,
+ AAUDIO_ERROR_UNIMPLEMENTED,
+ AAUDIO_ERROR_UNAVAILABLE,
+ AAUDIO_ERROR_NO_FREE_HANDLES,
+ AAUDIO_ERROR_NO_MEMORY,
+ AAUDIO_ERROR_NULL,
+ AAUDIO_ERROR_TIMEOUT,
+ AAUDIO_ERROR_WOULD_BLOCK,
+ AAUDIO_ERROR_INVALID_ORDER,
+ AAUDIO_ERROR_OUT_OF_RANGE,
+ AAUDIO_ERROR_NO_SERVICE
+};
+
+typedef enum {
+ AAUDIO_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
+ AAUDIO_CLOCK_BOOTTIME, // Clock since booted, runs all the time.
+ AAUDIO_CLOCK_COUNT // This should always be last.
+} aaudio_clockid_t;
+
+typedef enum
+{
+ AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+ AAUDIO_STREAM_STATE_OPEN,
+ AAUDIO_STREAM_STATE_STARTING,
+ AAUDIO_STREAM_STATE_STARTED,
+ AAUDIO_STREAM_STATE_PAUSING,
+ AAUDIO_STREAM_STATE_PAUSED,
+ AAUDIO_STREAM_STATE_FLUSHING,
+ AAUDIO_STREAM_STATE_FLUSHED,
+ AAUDIO_STREAM_STATE_STOPPING,
+ AAUDIO_STREAM_STATE_STOPPED,
+ AAUDIO_STREAM_STATE_CLOSING,
+ AAUDIO_STREAM_STATE_CLOSED,
+} aaudio_stream_state_t;
+
+// TODO review API
+typedef enum {
+ /**
+ * This will use an AudioTrack object for playing audio
+ * and an AudioRecord for recording data.
+ */
+ AAUDIO_SHARING_MODE_LEGACY,
+ /**
+ * This will be the only stream using a particular source or sink.
+ * This mode will provide the lowest possible latency.
+ * You should close EXCLUSIVE streams immediately when you are not using them.
+ */
+ AAUDIO_SHARING_MODE_EXCLUSIVE,
+ /**
+ * Multiple applications will be mixed by the AAudio Server.
+ * This will have higher latency than the EXCLUSIVE mode.
+ */
+ AAUDIO_SHARING_MODE_SHARED,
+ /**
+ * Multiple applications will do their own mixing into a memory mapped buffer.
+ * It may be possible for malicious applications to read the data produced by
+ * other apps. So do not use this for private data such as telephony or messaging.
+ */
+ AAUDIO_SHARING_MODE_PUBLIC_MIX,
+ AAUDIO_SHARING_MODE_COUNT // This should always be last.
+} aaudio_sharing_mode_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AAUDIO_AAUDIODEFINITIONS_H
diff --git a/media/liboboe/include/oboe/NOTICE b/media/libaaudio/include/aaudio/NOTICE
similarity index 100%
rename from media/liboboe/include/oboe/NOTICE
rename to media/libaaudio/include/aaudio/NOTICE
diff --git a/media/liboboe/include/oboe/README.md b/media/libaaudio/include/aaudio/README.md
similarity index 69%
rename from media/liboboe/include/oboe/README.md
rename to media/libaaudio/include/aaudio/README.md
index de60d03..8c4ae51 100644
--- a/media/liboboe/include/oboe/README.md
+++ b/media/libaaudio/include/aaudio/README.md
@@ -1,4 +1,4 @@
-Oboe Audio headers
+AAudio Audio headers
This folder contains the public header files.
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
new file mode 100644
index 0000000..ecae991
--- /dev/null
+++ b/media/libaaudio/libaaudio.map.txt
@@ -0,0 +1,46 @@
+LIBAAUDIO {
+ global:
+ AAudio_getNanoseconds;
+ AAudio_convertResultToText;
+ AAudio_convertStreamStateToText;
+ AAudio_createStreamBuilder;
+ AAudioStreamBuilder_setDeviceId;
+ AAudioStreamBuilder_setSampleRate;
+ AAudioStreamBuilder_getSampleRate;
+ AAudioStreamBuilder_setSamplesPerFrame;
+ AAudioStreamBuilder_getSamplesPerFrame;
+ AAudioStreamBuilder_setFormat;
+ AAudioStreamBuilder_getFormat;
+ AAudioStreamBuilder_setSharingMode;
+ AAudioStreamBuilder_getSharingMode;
+ AAudioStreamBuilder_setDirection;
+ AAudioStreamBuilder_getDirection;
+ AAudioStreamBuilder_openStream;
+ AAudioStreamBuilder_delete;
+ AAudioStream_close;
+ AAudioStream_requestStart;
+ AAudioStream_requestPause;
+ AAudioStream_requestFlush;
+ AAudioStream_requestStop;
+ AAudioStream_getState;
+ AAudioStream_waitForStateChange;
+ AAudioStream_read;
+ AAudioStream_write;
+ AAudioStream_createThread;
+ AAudioStream_joinThread;
+ AAudioStream_setBufferSize;
+ AAudioStream_getBufferSize;
+ AAudioStream_getFramesPerBurst;
+ AAudioStream_getBufferCapacity;
+ AAudioStream_getXRunCount;
+ AAudioStream_getSampleRate;
+ AAudioStream_getSamplesPerFrame;
+ AAudioStream_getFormat;
+ AAudioStream_getSharingMode;
+ AAudioStream_getDirection;
+ AAudioStream_getFramesWritten;
+ AAudioStream_getFramesRead;
+ AAudioStream_getTimestamp;
+ local:
+ *;
+};
diff --git a/media/libaaudio/scripts/convert_oboe_aaudio.sh b/media/libaaudio/scripts/convert_oboe_aaudio.sh
new file mode 100755
index 0000000..2bf025a
--- /dev/null
+++ b/media/libaaudio/scripts/convert_oboe_aaudio.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Use SED to convert the Oboe API to the AAudio API
+
+echo "Convert Oboe names to AAudio names"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
+echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+ path=$1
+ pattern=$2
+ find $path -type f -name $pattern -exec sed -i -f ${LIBOBOE_DIR}/scripts/oboe_to_aaudio.sed {} \;
+}
+
+function convertPath {
+ path=$1
+ convertPathPattern $1 '*.cpp'
+ convertPathPattern $1 '*.h'
+ # the mk match does not work!
+ convertPathPattern $1 '*.mk'
+ convertPathPattern $1 '*.md'
+ convertPathPattern $1 '*.bp'
+}
+
+#convertPath ${LIBOBOE_DIR}/examples
+#convertPath ${LIBOBOE_DIR}/include
+#convertPath ${LIBOBOE_DIR}/src
+#convertPath ${LIBOBOE_DIR}/tests
+convertPath ${LIBOBOE_DIR}
+convertPathPattern ${LIBOBOE_DIR} Android.mk
+convertPathPattern ${LIBOBOE_DIR} liboboe.map.txt
+
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOESERVICE_DIR} Android.mk
+
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
+mv ${LIBOBOE_DIR}/include/oboe ${LIBOBOE_DIR}/include/aaudio
+mv ${LIBOBOE_DIR}/include/aaudio/OboeAudio.h ${LIBOBOE_DIR}/include/aaudio/AAudio.h
+mv ${OBOESERVICE_DIR}/OboeService.h ${OBOESERVICE_DIR}/AAudioServiceDefinitions.h
+mv ${LIBOBOE_DIR}/tests/test_oboe_api.cpp ${LIBOBOE_DIR}/tests/test_aaudio_api.cpp
+
+# Rename files with Oboe in the name.
+find -name "*OboeAudioService*.cpp" | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*OboeAudioService*.h" | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*Oboe*.cpp" | rename -v "s/Oboe/AAudio/g"
+find -name "*Oboe*.h" | rename -v "s/Oboe/AAudio/g"
diff --git a/media/libaaudio/scripts/oboe_to_aaudio.sed b/media/libaaudio/scripts/oboe_to_aaudio.sed
new file mode 100644
index 0000000..7da85a0
--- /dev/null
+++ b/media/libaaudio/scripts/oboe_to_aaudio.sed
@@ -0,0 +1,16 @@
+s/liboboe/libclarinet/g
+s/oboeservice/clarinetservice/g
+
+s/OboeAudio\.h/AAudio\.h/g
+s/OboeService\.h/AAudioServiceDefinitions\.h/g
+s/OboeAudioService/AAudioService/g
+s/LOG_TAG "OboeAudio"/LOG_TAG "AAudio"/g
+s/OBOE_AUDIO_FORMAT/AAUDIO_FORMAT/g
+s/OBOEAUDIO/AAUDIO/g
+
+s/oboe/aaudio/g
+s/Oboe/AAudio/g
+s/OBOE/AAUDIO/g
+
+s/libclarinet/liboboe/g
+s/clarinetservice/oboeservice/g
diff --git a/media/libaaudio/scripts/revert_all_aaudio.sh b/media/libaaudio/scripts/revert_all_aaudio.sh
new file mode 100755
index 0000000..de3fa7a
--- /dev/null
+++ b/media/libaaudio/scripts/revert_all_aaudio.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+echo "Revert Oboe names to AAudio names"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
+echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+git checkout -- ${LIBOBOE_DIR}/examples
+git checkout -- ${LIBOBOE_DIR}/include
+git checkout -- ${LIBOBOE_DIR}/src
+git checkout -- ${LIBOBOE_DIR}/tests
+git checkout -- ${LIBOBOE_DIR}/Android.bp
+git checkout -- ${LIBOBOE_DIR}/README.md
+git checkout -- ${LIBOBOE_DIR}/liboboe.map.txt
+git checkout -- ${OBOESERVICE_DIR}
+git checkout -- ${OBOETEST_DIR}
+
+rm -rf ${LIBOBOE_DIR}/include/aaudio
+
+find . -name "*aaudio*.cpp" -print -delete
+find . -name "*AAudio*.cpp" -print -delete
+find . -name "*AAudio*.h" -print -delete
diff --git a/media/liboboe/src/Android.mk b/media/libaaudio/src/Android.mk
similarity index 68%
rename from media/liboboe/src/Android.mk
rename to media/libaaudio/src/Android.mk
index 59edcb2..a016b49 100644
--- a/media/liboboe/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -1,22 +1,22 @@
LOCAL_PATH:= $(call my-dir)
# ======================= STATIC LIBRARY ==========================
-# This is being built because it make Oboe testing very easy with a complete executable.
+# This is being built because it make AAudio testing very easy with a complete executable.
# TODO Remove this target later, when not needed.
include $(CLEAR_VARS)
-LOCAL_MODULE := liboboe
+LOCAL_MODULE := libaaudio
LOCAL_MODULE_TAGS := optional
-LIBOBOE_DIR := $(TOP)/frameworks/av/media/liboboe
-LIBOBOE_SRC_DIR := $(LIBOBOE_DIR)/src
+LIBAAUDIO_DIR := $(TOP)/frameworks/av/media/libaaudio
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/native/include \
system/core/base/include \
- frameworks/native/media/liboboe/include/include \
- frameworks/av/media/liboboe/include \
+ frameworks/native/media/libaaudio/include/include \
+ frameworks/av/media/libaaudio/include \
frameworks/native/include \
$(LOCAL_PATH) \
$(LOCAL_PATH)/binding \
@@ -29,11 +29,11 @@
LOCAL_SRC_FILES = \
core/AudioStream.cpp \
core/AudioStreamBuilder.cpp \
- core/OboeAudio.cpp \
+ core/AAudioAudio.cpp \
legacy/AudioStreamRecord.cpp \
legacy/AudioStreamTrack.cpp \
utility/HandleTracker.cpp \
- utility/OboeUtilities.cpp \
+ utility/AAudioUtilities.cpp \
fifo/FifoBuffer.cpp \
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
@@ -43,31 +43,31 @@
binding/SharedRegionParcelable.cpp \
binding/RingBufferParcelable.cpp \
binding/AudioEndpointParcelable.cpp \
- binding/OboeStreamRequest.cpp \
- binding/OboeStreamConfiguration.cpp \
- binding/IOboeAudioService.cpp
+ binding/AAudioStreamRequest.cpp \
+ binding/AAudioStreamConfiguration.cpp \
+ binding/IAAudioService.cpp
LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
# By default, all symbols are hidden.
# LOCAL_CFLAGS += -fvisibility=hidden
-# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
include $(BUILD_STATIC_LIBRARY)
# ======================= SHARED LIBRARY ==========================
include $(CLEAR_VARS)
-LOCAL_MODULE := liboboe
+LOCAL_MODULE := libaaudio
LOCAL_MODULE_TAGS := optional
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/native/include \
system/core/base/include \
- frameworks/native/media/liboboe/include/include \
- frameworks/av/media/liboboe/include \
+ frameworks/native/media/libaaudio/include/include \
+ frameworks/av/media/libaaudio/include \
$(LOCAL_PATH) \
$(LOCAL_PATH)/binding \
$(LOCAL_PATH)/client \
@@ -78,11 +78,11 @@
LOCAL_SRC_FILES = core/AudioStream.cpp \
core/AudioStreamBuilder.cpp \
- core/OboeAudio.cpp \
+ core/AAudioAudio.cpp \
legacy/AudioStreamRecord.cpp \
legacy/AudioStreamTrack.cpp \
utility/HandleTracker.cpp \
- utility/OboeUtilities.cpp \
+ utility/AAudioUtilities.cpp \
fifo/FifoBuffer.cpp \
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
@@ -92,16 +92,16 @@
binding/SharedRegionParcelable.cpp \
binding/RingBufferParcelable.cpp \
binding/AudioEndpointParcelable.cpp \
- binding/OboeStreamRequest.cpp \
- binding/OboeStreamConfiguration.cpp \
- binding/IOboeAudioService.cpp
+ binding/AAudioStreamRequest.cpp \
+ binding/AAudioStreamConfiguration.cpp \
+ binding/IAAudioService.cpp
LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
# By default, all symbols are hidden.
# LOCAL_CFLAGS += -fvisibility=hidden
-# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder
diff --git a/media/liboboe/src/binding/OboeServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
similarity index 84%
rename from media/liboboe/src/binding/OboeServiceDefinitions.h
rename to media/libaaudio/src/binding/AAudioServiceDefinitions.h
index ad00fe2..ca637ef 100644
--- a/media/liboboe/src/binding/OboeServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -14,22 +14,22 @@
* limitations under the License.
*/
-#ifndef BINDING_OBOESERVICEDEFINITIONS_H
-#define BINDING_OBOESERVICEDEFINITIONS_H
+#ifndef BINDING_AAUDIOSERVICEDEFINITIONS_H
+#define BINDING_AAUDIOSERVICEDEFINITIONS_H
#include <stdint.h>
#include <utils/RefBase.h>
#include <binder/TextOutput.h>
#include <binder/IInterface.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
using android::NO_ERROR;
using android::IBinder;
namespace android {
-enum oboe_commands_t {
+enum aaudio_commands_t {
OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
CLOSE_STREAM,
GET_STREAM_DESCRIPTION,
@@ -37,15 +37,14 @@
PAUSE_STREAM,
FLUSH_STREAM,
REGISTER_AUDIO_THREAD,
- UNREGISTER_AUDIO_THREAD,
- TICKLE
+ UNREGISTER_AUDIO_THREAD
};
} // namespace android
-namespace oboe {
+namespace aaudio {
-enum oboe_commands_t {
+enum aaudio_commands_t {
OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
CLOSE_STREAM,
GET_STREAM_DESCRIPTION,
@@ -53,17 +52,16 @@
PAUSE_STREAM,
FLUSH_STREAM,
REGISTER_AUDIO_THREAD,
- UNREGISTER_AUDIO_THREAD,
- TICKLE
+ UNREGISTER_AUDIO_THREAD
};
// TODO Expand this to include all the open parameters.
-typedef struct OboeServiceStreamInfo_s {
+typedef struct AAudioServiceStreamInfo_s {
int32_t deviceId;
int32_t samplesPerFrame; // number of channels
- oboe_sample_rate_t sampleRate;
- oboe_audio_format_t audioFormat;
-} OboeServiceStreamInfo;
+ aaudio_sample_rate_t sampleRate;
+ aaudio_audio_format_t audioFormat;
+} AAudioServiceStreamInfo;
// This must be a fixed width so it can be in shared memory.
enum RingbufferFlags : uint32_t {
@@ -97,6 +95,6 @@
RingBufferDescriptor downDataQueueDescriptor; // eg. playback
} EndpointDescriptor;
-} // namespace oboe
+} // namespace aaudio
-#endif //BINDING_OBOESERVICEDEFINITIONS_H
+#endif //BINDING_AAUDIOSERVICEDEFINITIONS_H
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
new file mode 100644
index 0000000..16cb5eb
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+#define AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+// TODO move this an "include" folder for the service.
+
+struct AAudioMessageTimestamp {
+ aaudio_position_frames_t position;
+ int64_t deviceOffset; // add to client position to get device position
+ aaudio_nanoseconds_t timestamp;
+};
+
+typedef enum aaudio_service_event_e : uint32_t {
+ AAUDIO_SERVICE_EVENT_STARTED,
+ AAUDIO_SERVICE_EVENT_PAUSED,
+ AAUDIO_SERVICE_EVENT_FLUSHED,
+ AAUDIO_SERVICE_EVENT_CLOSED,
+ AAUDIO_SERVICE_EVENT_DISCONNECTED
+} aaudio_service_event_t;
+
+struct AAudioMessageEvent {
+ aaudio_service_event_t event;
+ int32_t data1;
+ int64_t data2;
+};
+
+typedef struct AAudioServiceMessage_s {
+ enum class code : uint32_t {
+ NOTHING,
+ TIMESTAMP,
+ EVENT,
+ };
+
+ code what;
+ union {
+ AAudioMessageTimestamp timestamp;
+ AAudioMessageEvent event;
+ };
+} AAudioServiceMessage;
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_MESSAGE_H
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
new file mode 100644
index 0000000..fe3a59f
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+AAudioStreamConfiguration::AAudioStreamConfiguration() {}
+AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
+
+status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
+ parcel->writeInt32(mDeviceId);
+ parcel->writeInt32(mSampleRate);
+ parcel->writeInt32(mSamplesPerFrame);
+ parcel->writeInt32((int32_t) mAudioFormat);
+ parcel->writeInt32(mBufferCapacity);
+ return NO_ERROR; // TODO check for errors above
+}
+
+status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
+ int32_t temp;
+ parcel->readInt32(&mDeviceId);
+ parcel->readInt32(&mSampleRate);
+ parcel->readInt32(&mSamplesPerFrame);
+ parcel->readInt32(&temp);
+ mAudioFormat = (aaudio_audio_format_t) temp;
+ parcel->readInt32(&mBufferCapacity);
+ return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t AAudioStreamConfiguration::validate() {
+ // Validate results of the open.
+ if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
+ ALOGE("AAudioStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
+ ALOGE("AAudioStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ switch (mAudioFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ case AAUDIO_FORMAT_PCM_I8_24:
+ case AAUDIO_FORMAT_PCM_I32:
+ break;
+ default:
+ ALOGE("AAudioStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ if (mBufferCapacity < 0) {
+ ALOGE("AAudioStreamConfiguration.validate() invalid mBufferCapacity = %d", mBufferCapacity);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ return AAUDIO_OK;
+}
+
+void AAudioStreamConfiguration::dump() {
+ ALOGD("AAudioStreamConfiguration mSampleRate = %d -----", mSampleRate);
+ ALOGD("AAudioStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+ ALOGD("AAudioStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
+ ALOGD("AAudioStreamConfiguration mBufferCapacity = %d", mBufferCapacity);
+}
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
new file mode 100644
index 0000000..efcdae8
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AAUDIO_STREAM_CONFIGURATION_H
+#define BINDING_AAUDIO_STREAM_CONFIGURATION_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <aaudio/AAudioDefinitions.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class AAudioStreamConfiguration : public Parcelable {
+public:
+ AAudioStreamConfiguration();
+ virtual ~AAudioStreamConfiguration();
+
+ aaudio_device_id_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ void setDeviceId(aaudio_device_id_t deviceId) {
+ mDeviceId = deviceId;
+ }
+
+ aaudio_sample_rate_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ void setSampleRate(aaudio_sample_rate_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ int32_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ void setSamplesPerFrame(int32_t samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ }
+
+ aaudio_audio_format_t getAudioFormat() const {
+ return mAudioFormat;
+ }
+
+ void setAudioFormat(aaudio_audio_format_t audioFormat) {
+ mAudioFormat = audioFormat;
+ }
+
+ aaudio_size_frames_t getBufferCapacity() const {
+ return mBufferCapacity;
+ }
+
+ void setBufferCapacity(aaudio_size_frames_t frames) {
+ mBufferCapacity = frames;
+ }
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t validate();
+
+ void dump();
+
+protected:
+ aaudio_device_id_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+ aaudio_sample_rate_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_audio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_size_frames_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_AAUDIO_STREAM_CONFIGURATION_H
diff --git a/media/liboboe/src/binding/OboeStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
similarity index 69%
rename from media/liboboe/src/binding/OboeStreamRequest.cpp
rename to media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 5d521d0..5202b73 100644
--- a/media/liboboe/src/binding/OboeStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -20,32 +20,32 @@
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
-#include "binding/OboeStreamConfiguration.h"
-#include "binding/OboeStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AAudioStreamRequest.h"
using android::NO_ERROR;
using android::status_t;
using android::Parcel;
using android::Parcelable;
-using namespace oboe;
+using namespace aaudio;
-OboeStreamRequest::OboeStreamRequest()
+AAudioStreamRequest::AAudioStreamRequest()
: mConfiguration()
{}
-OboeStreamRequest::~OboeStreamRequest() {}
+AAudioStreamRequest::~AAudioStreamRequest() {}
-status_t OboeStreamRequest::writeToParcel(Parcel* parcel) const {
+status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
parcel->writeInt32((int32_t) mUserId);
parcel->writeInt32((int32_t) mProcessId);
mConfiguration.writeToParcel(parcel);
return NO_ERROR; // TODO check for errors above
}
-status_t OboeStreamRequest::readFromParcel(const Parcel* parcel) {
+status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
int32_t temp;
parcel->readInt32(&temp);
mUserId = (uid_t) temp;
@@ -55,12 +55,12 @@
return NO_ERROR; // TODO check for errors above
}
-oboe_result_t OboeStreamRequest::validate() {
+aaudio_result_t AAudioStreamRequest::validate() {
return mConfiguration.validate();
}
-void OboeStreamRequest::dump() {
- ALOGD("OboeStreamRequest mUserId = %d -----", mUserId);
- ALOGD("OboeStreamRequest mProcessId = %d", mProcessId);
+void AAudioStreamRequest::dump() {
+ ALOGD("AAudioStreamRequest mUserId = %d -----", mUserId);
+ ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
mConfiguration.dump();
}
diff --git a/media/liboboe/src/binding/OboeStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
similarity index 72%
rename from media/liboboe/src/binding/OboeStreamRequest.h
rename to media/libaaudio/src/binding/AAudioStreamRequest.h
index aab3c97..0fd28ba 100644
--- a/media/liboboe/src/binding/OboeStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -14,27 +14,27 @@
* limitations under the License.
*/
-#ifndef BINDING_OBOE_STREAM_REQUEST_H
-#define BINDING_OBOE_STREAM_REQUEST_H
+#ifndef BINDING_AAUDIO_STREAM_REQUEST_H
+#define BINDING_AAUDIO_STREAM_REQUEST_H
#include <stdint.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
-#include "binding/OboeStreamConfiguration.h"
+#include "binding/AAudioStreamConfiguration.h"
using android::status_t;
using android::Parcel;
using android::Parcelable;
-namespace oboe {
+namespace aaudio {
-class OboeStreamRequest : public Parcelable {
+class AAudioStreamRequest : public Parcelable {
public:
- OboeStreamRequest();
- virtual ~OboeStreamRequest();
+ AAudioStreamRequest();
+ virtual ~AAudioStreamRequest();
uid_t getUserId() const {
return mUserId;
@@ -52,7 +52,7 @@
mProcessId = processId;
}
- OboeStreamConfiguration &getConfiguration() {
+ AAudioStreamConfiguration &getConfiguration() {
return mConfiguration;
}
@@ -60,16 +60,16 @@
virtual status_t readFromParcel(const Parcel* parcel) override;
- oboe_result_t validate();
+ aaudio_result_t validate();
void dump();
protected:
- OboeStreamConfiguration mConfiguration;
+ AAudioStreamConfiguration mConfiguration;
uid_t mUserId;
pid_t mProcessId;
};
-} /* namespace oboe */
+} /* namespace aaudio */
-#endif //BINDING_OBOE_STREAM_REQUEST_H
+#endif //BINDING_AAUDIO_STREAM_REQUEST_H
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
similarity index 86%
rename from media/liboboe/src/binding/AudioEndpointParcelable.cpp
rename to media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index 096a819..f40ee02 100644
--- a/media/liboboe/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -20,7 +20,7 @@
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
-#include "binding/OboeServiceDefinitions.h"
+#include "binding/AAudioServiceDefinitions.h"
#include "binding/RingBufferParcelable.h"
#include "binding/AudioEndpointParcelable.h"
@@ -29,11 +29,11 @@
using android::Parcel;
using android::Parcelable;
-using namespace oboe;
+using namespace aaudio;
/**
* Container for information about the message queues plus
- * general stream information needed by Oboe clients.
+ * general stream information needed by AAudio clients.
* It contains no addresses, just sizes, offsets and file descriptors for
* shared memory that can be passed through Binder.
*/
@@ -47,7 +47,7 @@
*/
int32_t AudioEndpointParcelable::addFileDescriptor(int fd, int32_t sizeInBytes) {
if (mNumSharedMemories >= MAX_SHARED_MEMORIES) {
- return OBOE_ERROR_OUT_OF_RANGE;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
}
int32_t index = mNumSharedMemories++;
mSharedMemories[index].setup(fd, sizeInBytes);
@@ -81,45 +81,45 @@
return NO_ERROR; // TODO check for errors above
}
-oboe_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
+aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
// TODO error check
mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
mDownMessageQueueParcelable.resolve(mSharedMemories,
&descriptor->downMessageQueueDescriptor);
mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
- return OBOE_OK;
+ return AAUDIO_OK;
}
-oboe_result_t AudioEndpointParcelable::validate() {
- oboe_result_t result;
+aaudio_result_t AudioEndpointParcelable::validate() {
+ aaudio_result_t result;
if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
for (int i = 0; i < mNumSharedMemories; i++) {
result = mSharedMemories[i].validate();
- if (result != OBOE_OK) {
+ if (result != AAUDIO_OK) {
return result;
}
}
- if ((result = mUpMessageQueueParcelable.validate()) != OBOE_OK) {
+ if ((result = mUpMessageQueueParcelable.validate()) != AAUDIO_OK) {
ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
return result;
}
- if ((result = mDownMessageQueueParcelable.validate()) != OBOE_OK) {
+ if ((result = mDownMessageQueueParcelable.validate()) != AAUDIO_OK) {
ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
return result;
}
- if ((result = mUpDataQueueParcelable.validate()) != OBOE_OK) {
+ if ((result = mUpDataQueueParcelable.validate()) != AAUDIO_OK) {
ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
return result;
}
- if ((result = mDownDataQueueParcelable.validate()) != OBOE_OK) {
+ if ((result = mDownDataQueueParcelable.validate()) != AAUDIO_OK) {
ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
return result;
}
- return OBOE_OK;
+ return AAUDIO_OK;
}
void AudioEndpointParcelable::dump() {
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
similarity index 89%
rename from media/liboboe/src/binding/AudioEndpointParcelable.h
rename to media/libaaudio/src/binding/AudioEndpointParcelable.h
index 6bdd8a4..d4646d0 100644
--- a/media/liboboe/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -23,18 +23,18 @@
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
-#include "binding/OboeServiceDefinitions.h"
+#include "binding/AAudioServiceDefinitions.h"
#include "binding/RingBufferParcelable.h"
using android::status_t;
using android::Parcel;
using android::Parcelable;
-namespace oboe {
+namespace aaudio {
/**
* Container for information about the message queues plus
- * general stream information needed by Oboe clients.
+ * general stream information needed by AAudio clients.
* It contains no addresses, just sizes, offsets and file descriptors for
* shared memory that can be passed through Binder.
*/
@@ -53,9 +53,9 @@
virtual status_t readFromParcel(const Parcel* parcel) override;
- oboe_result_t resolve(EndpointDescriptor *descriptor);
+ aaudio_result_t resolve(EndpointDescriptor *descriptor);
- oboe_result_t validate();
+ aaudio_result_t validate();
void dump();
@@ -71,6 +71,6 @@
SharedMemoryParcelable mSharedMemories[MAX_SHARED_MEMORIES];
};
-} /* namespace oboe */
+} /* namespace aaudio */
#endif //BINDING_AUDIOENDPOINTPARCELABLE_H
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
new file mode 100644
index 0000000..899ebc0
--- /dev/null
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "utility/AAudioUtilities.h"
+
+namespace android {
+
+/**
+ * This is used by the AAudio Client to talk to the AAudio Service.
+ *
+ * The order of parameters in the Parcels must match with code in AAudioService.cpp.
+ */
+class BpAAudioService : public BpInterface<IAAudioService>
+{
+public:
+ explicit BpAAudioService(const sp<IBinder>& impl)
+ : BpInterface<IAAudioService>(impl)
+ {
+ }
+
+ virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ request.writeToParcel(&data);
+ status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_handle_t stream;
+ reply.readInt32(&stream);
+ configuration.readFromParcel(&reply);
+ return stream;
+ }
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ parcelable.readFromParcel(&reply);
+ parcelable.dump();
+ aaudio_result_t result = parcelable.validate();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ reply.readInt32(&result);
+ return result;
+ }
+
+ // TODO should we wait for a reply?
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(START_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+ aaudio_nanoseconds_t periodNanoseconds)
+ override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ data.writeInt32((int32_t) clientThreadId);
+ data.writeInt64(periodNanoseconds);
+ status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId)
+ override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ data.writeInt32((int32_t) clientThreadId);
+ status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+};
+
+// Implement an interface to the service.
+// This is here so that you don't have to link with liboboe static library.
+IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
+
+// The order of parameters in the Parcels must match with code in BpAAudioService
+
+status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags) {
+ AAudioStream stream;
+ aaudio::AAudioStreamRequest request;
+ aaudio::AAudioStreamConfiguration configuration;
+ pid_t pid;
+ aaudio_nanoseconds_t nanoseconds;
+ aaudio_result_t result;
+ ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
+ data.checkInterface(this);
+
+ switch(code) {
+ case OPEN_STREAM: {
+ request.readFromParcel(&data);
+ stream = openStream(request, configuration);
+ ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+ reply->writeInt32(stream);
+ configuration.writeToParcel(reply);
+ return NO_ERROR;
+ } break;
+
+ case CLOSE_STREAM: {
+ data.readInt32(&stream);
+ ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+ result = closeStream(stream);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case GET_STREAM_DESCRIPTION: {
+ data.readInt32(&stream);
+ ALOGD("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+ aaudio::AudioEndpointParcelable parcelable;
+ result = getStreamDescription(stream, parcelable);
+ if (result != AAUDIO_OK) {
+ return AAudioConvert_aaudioToAndroidStatus(result);
+ }
+ parcelable.dump();
+ result = parcelable.validate();
+ if (result != AAUDIO_OK) {
+ return AAudioConvert_aaudioToAndroidStatus(result);
+ }
+ parcelable.writeToParcel(reply);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case START_STREAM: {
+ data.readInt32(&stream);
+ result = startStream(stream);
+ ALOGD("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case PAUSE_STREAM: {
+ data.readInt32(&stream);
+ result = pauseStream(stream);
+ ALOGD("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case FLUSH_STREAM: {
+ data.readInt32(&stream);
+ result = flushStream(stream);
+ ALOGD("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case REGISTER_AUDIO_THREAD: {
+ data.readInt32(&stream);
+ data.readInt32(&pid);
+ data.readInt64(&nanoseconds);
+ result = registerAudioThread(stream, pid, nanoseconds);
+ ALOGD("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case UNREGISTER_AUDIO_THREAD: {
+ data.readInt32(&stream);
+ data.readInt32(&pid);
+ result = unregisterAudioThread(stream, pid);
+ ALOGD("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ default:
+ // ALOGW("BnAAudioService::onTransact not handled %u", code);
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
new file mode 100644
index 0000000..f3b297e
--- /dev/null
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_IAAUDIOSERVICE_H
+#define BINDING_IAAUDIOSERVICE_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+
+namespace android {
+
+// Interface (our AIDL) - Shared by server and client
+class IAAudioService : public IInterface {
+public:
+
+ DECLARE_META_INTERFACE(AAudioService);
+
+ /**
+ * @param request info needed to create the stream
+ * @param configuration contains information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration) = 0;
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+
+ /* Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) = 0;
+
+ /**
+ * Start the flow of data.
+ */
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ */
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ */
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ */
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+ aaudio_nanoseconds_t periodNanoseconds) = 0;
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) = 0;
+};
+
+class BnAAudioService : public BnInterface<IAAudioService> {
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags = 0);
+
+};
+
+} /* namespace android */
+
+#endif //BINDING_IAAUDIOSERVICE_H
diff --git a/media/liboboe/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
similarity index 87%
rename from media/liboboe/src/binding/RingBufferParcelable.cpp
rename to media/libaaudio/src/binding/RingBufferParcelable.cpp
index f097655..3a92929 100644
--- a/media/liboboe/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -18,11 +18,11 @@
#include <binder/Parcelable.h>
-#include "binding/OboeServiceDefinitions.h"
+#include "binding/AAudioServiceDefinitions.h"
#include "binding/SharedRegionParcelable.h"
#include "binding/RingBufferParcelable.h"
-using namespace oboe;
+using namespace aaudio;
RingBufferParcelable::RingBufferParcelable() {}
RingBufferParcelable::~RingBufferParcelable() {}
@@ -100,23 +100,23 @@
return NO_ERROR; // TODO check for errors above
}
-oboe_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
- oboe_result_t result;
+aaudio_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
+ aaudio_result_t result;
result = mReadCounterParcelable.resolve(memoryParcels,
(void **) &descriptor->readCounterAddress);
- if (result != OBOE_OK) {
+ if (result != AAUDIO_OK) {
return result;
}
result = mWriteCounterParcelable.resolve(memoryParcels,
(void **) &descriptor->writeCounterAddress);
- if (result != OBOE_OK) {
+ if (result != AAUDIO_OK) {
return result;
}
result = mDataParcelable.resolve(memoryParcels, (void **) &descriptor->dataAddress);
- if (result != OBOE_OK) {
+ if (result != AAUDIO_OK) {
return result;
}
@@ -124,36 +124,36 @@
descriptor->framesPerBurst = mFramesPerBurst;
descriptor->capacityInFrames = mCapacityInFrames;
descriptor->flags = mFlags;
- return OBOE_OK;
+ return AAUDIO_OK;
}
-oboe_result_t RingBufferParcelable::validate() {
- oboe_result_t result;
+aaudio_result_t RingBufferParcelable::validate() {
+ aaudio_result_t result;
if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
if (mFramesPerBurst < 0 || mFramesPerBurst >= 1024) {
ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
- if ((result = mReadCounterParcelable.validate()) != OBOE_OK) {
+ if ((result = mReadCounterParcelable.validate()) != AAUDIO_OK) {
ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
return result;
}
- if ((result = mWriteCounterParcelable.validate()) != OBOE_OK) {
+ if ((result = mWriteCounterParcelable.validate()) != AAUDIO_OK) {
ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
return result;
}
- if ((result = mDataParcelable.validate()) != OBOE_OK) {
+ if ((result = mDataParcelable.validate()) != AAUDIO_OK) {
ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
return result;
}
- return OBOE_OK;
+ return AAUDIO_OK;
}
diff --git a/media/liboboe/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
similarity index 91%
rename from media/liboboe/src/binding/RingBufferParcelable.h
rename to media/libaaudio/src/binding/RingBufferParcelable.h
index 9bb695a..3f82c79 100644
--- a/media/liboboe/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -21,10 +21,10 @@
#include <binder/Parcelable.h>
-#include "binding/OboeServiceDefinitions.h"
+#include "binding/AAudioServiceDefinitions.h"
#include "binding/SharedRegionParcelable.h"
-namespace oboe {
+namespace aaudio {
class RingBufferParcelable : public Parcelable {
public:
@@ -62,9 +62,9 @@
virtual status_t readFromParcel(const Parcel* parcel) override;
- oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
+ aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
- oboe_result_t validate();
+ aaudio_result_t validate();
void dump();
@@ -78,6 +78,6 @@
RingbufferFlags mFlags = RingbufferFlags::NONE;
};
-} /* namespace oboe */
+} /* namespace aaudio */
#endif //BINDING_RINGBUFFER_PARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
similarity index 85%
rename from media/liboboe/src/binding/SharedMemoryParcelable.cpp
rename to media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 5b739c0..1102dec 100644
--- a/media/liboboe/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -17,7 +17,7 @@
#include <stdint.h>
#include <sys/mman.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include <binder/Parcelable.h>
@@ -28,7 +28,7 @@
using android::Parcel;
using android::Parcelable;
-using namespace oboe;
+using namespace aaudio;
SharedMemoryParcelable::SharedMemoryParcelable() {}
SharedMemoryParcelable::~SharedMemoryParcelable() {};
@@ -56,48 +56,48 @@
// TODO Add code to unmmap()
-oboe_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
+aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
void **regionAddressPtr) {
if (offsetInBytes < 0) {
ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
- return OBOE_ERROR_OUT_OF_RANGE;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
} else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
"sizeInBytes = %d, mSizeInBytes = %d",
offsetInBytes, sizeInBytes, mSizeInBytes);
- return OBOE_ERROR_OUT_OF_RANGE;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mResolvedAddress == nullptr) {
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
MAP_SHARED, mFd, 0);
if (mResolvedAddress == nullptr) {
ALOGE("SharedMemoryParcelable mmap failed for fd = %d", mFd);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
}
*regionAddressPtr = mResolvedAddress + offsetInBytes;
ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
offsetInBytes, *regionAddressPtr);
- return OBOE_OK;
+ return AAUDIO_OK;
}
int32_t SharedMemoryParcelable::getSizeInBytes() {
return mSizeInBytes;
}
-oboe_result_t SharedMemoryParcelable::validate() {
- if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+aaudio_result_t SharedMemoryParcelable::validate() {
+ if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mSizeInBytes > 0) {
if (mFd == -1) {
ALOGE("SharedMemoryParcelable uninitialized mFd = %d", mFd);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
}
- return OBOE_OK;
+ return AAUDIO_OK;
}
void SharedMemoryParcelable::dump() {
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
similarity index 85%
rename from media/liboboe/src/binding/SharedMemoryParcelable.h
rename to media/libaaudio/src/binding/SharedMemoryParcelable.h
index 9585779..7e0bf1a 100644
--- a/media/liboboe/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -27,12 +27,12 @@
using android::Parcel;
using android::Parcelable;
-namespace oboe {
+namespace aaudio {
// Arbitrary limits for sanity checks. TODO remove after debugging.
#define MAX_SHARED_MEMORIES (32)
-#define MAX_MMAP_OFFSET (32 * 1024)
-#define MAX_MMAP_SIZE (32 * 1024)
+#define MAX_MMAP_OFFSET_BYTES (32 * 1024 * 8)
+#define MAX_MMAP_SIZE_BYTES (32 * 1024 * 8)
/**
* This is a parcelable description of a shared memory referenced by a file descriptor.
@@ -49,11 +49,11 @@
virtual status_t readFromParcel(const Parcel* parcel) override;
- oboe_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
+ aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
int32_t getSizeInBytes();
- oboe_result_t validate();
+ aaudio_result_t validate();
void dump();
@@ -63,6 +63,6 @@
uint8_t *mResolvedAddress = nullptr;
};
-} /* namespace oboe */
+} /* namespace aaudio */
#endif //BINDING_SHAREDMEMORYPARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
similarity index 85%
rename from media/liboboe/src/binding/SharedRegionParcelable.cpp
rename to media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 86ce8f3..8ca0023 100644
--- a/media/liboboe/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -19,7 +19,7 @@
#include <sys/mman.h>
#include <binder/Parcelable.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "binding/SharedMemoryParcelable.h"
#include "binding/SharedRegionParcelable.h"
@@ -29,7 +29,7 @@
using android::Parcel;
using android::Parcelable;
-using namespace oboe;
+using namespace aaudio;
SharedRegionParcelable::SharedRegionParcelable() {}
SharedRegionParcelable::~SharedRegionParcelable() {}
@@ -60,36 +60,36 @@
return NO_ERROR; // TODO check for errors above
}
-oboe_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
+aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
void **regionAddressPtr) {
if (mSizeInBytes == 0) {
*regionAddressPtr = nullptr;
- return OBOE_OK;
+ return AAUDIO_OK;
}
if (mSharedMemoryIndex < 0) {
ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
}
-oboe_result_t SharedRegionParcelable::validate() {
- if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+aaudio_result_t SharedRegionParcelable::validate() {
+ if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mSizeInBytes > 0) {
- if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET) {
+ if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET_BYTES) {
ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
}
- return OBOE_OK;
+ return AAUDIO_OK;
}
void SharedRegionParcelable::dump() {
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
similarity index 87%
rename from media/liboboe/src/binding/SharedRegionParcelable.h
rename to media/libaaudio/src/binding/SharedRegionParcelable.h
index bccdaa8..d6c2281 100644
--- a/media/liboboe/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -22,7 +22,7 @@
#include <sys/mman.h>
#include <binder/Parcelable.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "binding/SharedMemoryParcelable.h"
@@ -30,7 +30,7 @@
using android::Parcel;
using android::Parcelable;
-namespace oboe {
+namespace aaudio {
class SharedRegionParcelable : public Parcelable {
public:
@@ -43,9 +43,9 @@
virtual status_t readFromParcel(const Parcel* parcel) override;
- oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
+ aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
- oboe_result_t validate();
+ aaudio_result_t validate();
void dump();
@@ -55,6 +55,6 @@
int32_t mSizeInBytes = 0;
};
-} /* namespace oboe */
+} /* namespace aaudio */
#endif //BINDING_SHAREDREGIONPARCELABLE_H
diff --git a/media/liboboe/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
similarity index 90%
rename from media/liboboe/src/client/AudioEndpoint.cpp
rename to media/libaaudio/src/client/AudioEndpoint.cpp
index 160c37e..5cd9782 100644
--- a/media/liboboe/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -14,19 +14,19 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <cassert>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "AudioEndpointParcelable.h"
#include "AudioEndpoint.h"
-#include "OboeServiceMessage.h"
+#include "AAudioServiceMessage.h"
using namespace android;
-using namespace oboe;
+using namespace aaudio;
AudioEndpoint::AudioEndpoint()
: mOutputFreeRunning(false)
@@ -80,13 +80,13 @@
AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
}
-oboe_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
{
- oboe_result_t result = OBOE_OK;
+ aaudio_result_t result = AAUDIO_OK;
AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
- assert(descriptor->bytesPerFrame == sizeof(OboeServiceMessage));
+ assert(descriptor->bytesPerFrame == sizeof(AAudioServiceMessage));
assert(descriptor->readCounterAddress != nullptr);
assert(descriptor->writeCounterAddress != nullptr);
mUpCommandQueue = new FifoBuffer(
@@ -137,12 +137,12 @@
return result;
}
-oboe_result_t AudioEndpoint::readUpCommand(OboeServiceMessage *commandPtr)
+aaudio_result_t AudioEndpoint::readUpCommand(AAudioServiceMessage *commandPtr)
{
return mUpCommandQueue->read(commandPtr, 1);
}
-oboe_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
+aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
{
return mDownDataQueue->write(buffer, numFrames);
}
@@ -167,15 +167,15 @@
return mDownDataQueue->getWriteCounter();
}
-oboe_size_frames_t AudioEndpoint::setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames)
+aaudio_size_frames_t AudioEndpoint::setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames)
{
if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
}
mDownDataQueue->setThreshold(requestedFrames);
*actualFrames = mDownDataQueue->getThreshold();
- return OBOE_OK;
+ return AAUDIO_OK;
}
int32_t AudioEndpoint::getBufferSizeInFrames() const
diff --git a/media/liboboe/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
similarity index 73%
rename from media/liboboe/src/client/AudioEndpoint.h
rename to media/libaaudio/src/client/AudioEndpoint.h
index 6ae8b72..e786513 100644
--- a/media/liboboe/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#ifndef OBOE_AUDIO_ENDPOINT_H
-#define OBOE_AUDIO_ENDPOINT_H
+#ifndef AAUDIO_AUDIO_ENDPOINT_H
+#define AAUDIO_AUDIO_ENDPOINT_H
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
-#include "OboeServiceMessage.h"
+#include "AAudioServiceMessage.h"
#include "AudioEndpointParcelable.h"
#include "fifo/FifoBuffer.h"
-namespace oboe {
+namespace aaudio {
#define ENDPOINT_DATA_QUEUE_SIZE_MIN 64
@@ -40,19 +40,19 @@
/**
* Configure based on the EndPointDescriptor_t.
*/
- oboe_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+ aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
/**
* Read from a command passed up from the Server.
* @return 1 if command received, 0 for no command, or negative error.
*/
- oboe_result_t readUpCommand(OboeServiceMessage *commandPtr);
+ aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
/**
* Non-blocking write.
* @return framesWritten or a negative error code.
*/
- oboe_result_t writeDataNow(const void *buffer, int32_t numFrames);
+ aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
/**
* Set the read index in the downData queue.
@@ -71,13 +71,13 @@
*/
bool isOutputFreeRunning() const { return mOutputFreeRunning; }
- int32_t setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames);
- oboe_size_frames_t getBufferSizeInFrames() const;
+ int32_t setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames);
+ aaudio_size_frames_t getBufferSizeInFrames() const;
- oboe_size_frames_t getBufferCapacityInFrames() const;
+ aaudio_size_frames_t getBufferCapacityInFrames() const;
- oboe_size_frames_t getFullFramesAvailable();
+ aaudio_size_frames_t getFullFramesAvailable();
private:
FifoBuffer * mUpCommandQueue;
@@ -87,6 +87,6 @@
fifo_counter_t mDataWriteCounter; // only used if free-running
};
-} // namespace oboe
+} // namespace aaudio
-#endif //OBOE_AUDIO_ENDPOINT_H
+#endif //AAUDIO_AUDIO_ENDPOINT_H
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
new file mode 100644
index 0000000..19f2300
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <binder/IServiceManager.h>
+#include <utils/Mutex.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+
+#include "core/AudioStreamBuilder.h"
+#include "AudioStreamInternal.h"
+
+#define LOG_TIMESTAMPS 0
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+using android::Mutex;
+
+using namespace aaudio;
+
+static android::Mutex gServiceLock;
+static sp<IAAudioService> gAAudioService;
+
+#define AAUDIO_SERVICE_NAME "AAudioService"
+
+// Helper function to get access to the "AAudioService" service.
+// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
+static const sp<IAAudioService> getAAudioService() {
+ sp<IBinder> binder;
+ Mutex::Autolock _l(gServiceLock);
+ if (gAAudioService == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ // Try several times to get the service.
+ int retries = 4;
+ do {
+ binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
+ if (binder != 0) {
+ break;
+ }
+ } while (retries-- > 0);
+
+ if (binder != 0) {
+ // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
+ // TODO Create a DeathRecipient that disconnects all active streams.
+ gAAudioService = interface_cast<IAAudioService>(binder);
+ } else {
+ ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
+ }
+ }
+ return gAAudioService;
+}
+
+AudioStreamInternal::AudioStreamInternal()
+ : AudioStream()
+ , mClockModel()
+ , mAudioEndpoint()
+ , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
+ , mFramesPerBurst(16)
+{
+}
+
+AudioStreamInternal::~AudioStreamInternal() {
+}
+
+aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
+
+ const sp<IAAudioService>& service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+
+ aaudio_result_t result = AAUDIO_OK;
+ AAudioStreamRequest request;
+ AAudioStreamConfiguration configuration;
+
+ result = AudioStream::open(builder);
+ if (result < 0) {
+ return result;
+ }
+
+ // Build the request to send to the server.
+ request.setUserId(getuid());
+ request.setProcessId(getpid());
+ request.getConfiguration().setDeviceId(getDeviceId());
+ request.getConfiguration().setSampleRate(getSampleRate());
+ request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+ request.getConfiguration().setAudioFormat(getFormat());
+ request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
+ request.dump();
+
+ mServiceStreamHandle = service->openStream(request, configuration);
+ ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+ (unsigned int)mServiceStreamHandle);
+ if (mServiceStreamHandle < 0) {
+ result = mServiceStreamHandle;
+ ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result);
+ } else {
+ result = configuration.validate();
+ if (result != AAUDIO_OK) {
+ close();
+ return result;
+ }
+ // Save results of the open.
+ setSampleRate(configuration.getSampleRate());
+ setSamplesPerFrame(configuration.getSamplesPerFrame());
+ setFormat(configuration.getAudioFormat());
+
+ aaudio::AudioEndpointParcelable parcelable;
+ result = service->getStreamDescription(mServiceStreamHandle, parcelable);
+ if (result != AAUDIO_OK) {
+ ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+ service->closeStream(mServiceStreamHandle);
+ return result;
+ }
+ // resolve parcelable into a descriptor
+ parcelable.resolve(&mEndpointDescriptor);
+
+ // Configure endpoint based on descriptor.
+ mAudioEndpoint.configure(&mEndpointDescriptor);
+
+ mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+ assert(mFramesPerBurst >= 16);
+ assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+
+ mClockModel.setSampleRate(getSampleRate());
+ mClockModel.setFramesPerBurst(mFramesPerBurst);
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamInternal::close() {
+ ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+ if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
+ aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
+ mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ aaudioService->closeStream(serviceStreamHandle);
+ return AAUDIO_OK;
+ } else {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+}
+
+aaudio_result_t AudioStreamInternal::requestStart()
+{
+ aaudio_nanoseconds_t startTime;
+ ALOGD("AudioStreamInternal(): start()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ startTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+ mClockModel.start(startTime);
+ processTimestamp(0, startTime);
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ return aaudioService->startStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+ ALOGD("AudioStreamInternal(): pause()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ mClockModel.stop(AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC));
+ setState(AAUDIO_STREAM_STATE_PAUSING);
+ return aaudioService->pauseStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestFlush() {
+ ALOGD("AudioStreamInternal(): flush()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+setState(AAUDIO_STREAM_STATE_FLUSHING);
+ return aaudioService->flushStream(mServiceStreamHandle);
+}
+
+void AudioStreamInternal::onFlushFromServer() {
+ ALOGD("AudioStreamInternal(): onFlushFromServer()");
+ aaudio_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+ aaudio_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+ // Bump offset so caller does not see the retrograde motion in getFramesRead().
+ aaudio_position_frames_t framesFlushed = writeCounter - readCounter;
+ mFramesOffsetFromService += framesFlushed;
+ // Flush written frames by forcing writeCounter to readCounter.
+ // This is because we cannot move the read counter in the hardware.
+ mAudioEndpoint.setDownDataWriteCounter(readCounter);
+}
+
+aaudio_result_t AudioStreamInternal::requestStop()
+{
+ // TODO better implementation of requestStop()
+ aaudio_result_t result = requestPause();
+ if (result == AAUDIO_OK) {
+ aaudio_stream_state_t state;
+ result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
+ &state,
+ 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
+ if (result == AAUDIO_OK) {
+ result = requestFlush();
+ }
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamInternal::registerThread() {
+ ALOGD("AudioStreamInternal(): registerThread()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return aaudioService->registerAudioThread(mServiceStreamHandle,
+ gettid(),
+ getPeriodNanoseconds());
+}
+
+aaudio_result_t AudioStreamInternal::unregisterThread() {
+ ALOGD("AudioStreamInternal(): unregisterThread()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ const sp<IAAudioService>& aaudioService = getAAudioService();
+ if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
+}
+
+// TODO use aaudio_clockid_t all the way down to AudioClock
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds) {
+// TODO implement using real HAL
+ aaudio_nanoseconds_t time = AudioClock::getNanoseconds();
+ *framePosition = mClockModel.convertTimeToPosition(time);
+ *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::updateState() {
+ return processCommands();
+}
+
+#if LOG_TIMESTAMPS
+static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
+ static int64_t oldPosition = 0;
+ static aaudio_nanoseconds_t oldTime = 0;
+ int64_t framePosition = command.timestamp.position;
+ aaudio_nanoseconds_t nanoTime = command.timestamp.timestamp;
+ ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+ (long long) framePosition,
+ (long long) nanoTime);
+ int64_t nanosDelta = nanoTime - oldTime;
+ if (nanosDelta > 0 && oldTime > 0) {
+ int64_t framesDelta = framePosition - oldPosition;
+ int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
+ ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+ ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+ ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+ }
+ oldPosition = framePosition;
+ oldTime = nanoTime;
+}
+#endif
+
+aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
+ aaudio_position_frames_t framePosition = 0;
+#if LOG_TIMESTAMPS
+ AudioStreamInternal_LogTimestamp(command);
+#endif
+ framePosition = message->timestamp.position;
+ processTimestamp(framePosition, message->timestamp.timestamp);
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
+ aaudio_result_t result = AAUDIO_OK;
+ ALOGD("processCommands() got event %d", message->event.event);
+ switch (message->event.event) {
+ case AAUDIO_SERVICE_EVENT_STARTED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ break;
+ case AAUDIO_SERVICE_EVENT_PAUSED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ break;
+ case AAUDIO_SERVICE_EVENT_FLUSHED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ onFlushFromServer();
+ break;
+ case AAUDIO_SERVICE_EVENT_CLOSED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ break;
+ case AAUDIO_SERVICE_EVENT_DISCONNECTED:
+ result = AAUDIO_ERROR_DISCONNECTED;
+ ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+ break;
+ default:
+ ALOGW("WARNING - processCommands() Unrecognized event = %d",
+ (int) message->event.event);
+ break;
+ }
+ return result;
+}
+
+// Process all the commands coming from the server.
+aaudio_result_t AudioStreamInternal::processCommands() {
+ aaudio_result_t result = AAUDIO_OK;
+
+ while (result == AAUDIO_OK) {
+ AAudioServiceMessage message;
+ if (mAudioEndpoint.readUpCommand(&message) != 1) {
+ break; // no command this time, no problem
+ }
+ switch (message.what) {
+ case AAudioServiceMessage::code::TIMESTAMP:
+ result = onTimestampFromServer(&message);
+ break;
+
+ case AAudioServiceMessage::code::EVENT:
+ result = onEventFromServer(&message);
+ break;
+
+ default:
+ ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ (int) message.what);
+ result = AAUDIO_ERROR_UNEXPECTED_VALUE;
+ break;
+ }
+ }
+ return result;
+}
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ aaudio_result_t result = AAUDIO_OK;
+ uint8_t* source = (uint8_t*)buffer;
+ aaudio_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
+ aaudio_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+ int32_t framesLeft = numFrames;
+// ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
+// buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+
+ // Write until all the data has been written or until a timeout occurs.
+ while (framesLeft > 0) {
+ // The call to writeNow() will not block. It will just write as much as it can.
+ aaudio_nanoseconds_t wakeTimeNanos = 0;
+ aaudio_result_t framesWritten = writeNow(source, framesLeft,
+ currentTimeNanos, &wakeTimeNanos);
+// ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+ if (framesWritten < 0) {
+ result = framesWritten;
+ break;
+ }
+ framesLeft -= (int32_t) framesWritten;
+ source += framesWritten * getBytesPerFrame();
+
+ // Should we block?
+ if (timeoutNanoseconds == 0) {
+ break; // don't block
+ } else if (framesLeft > 0) {
+ //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+ // clip the wake time to something reasonable
+ if (wakeTimeNanos < currentTimeNanos) {
+ wakeTimeNanos = currentTimeNanos;
+ }
+ if (wakeTimeNanos > deadlineNanos) {
+ // If we time out, just return the framesWritten so far.
+ ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+ break;
+ }
+
+ //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+ // (long long) (wakeTimeNanos - currentTimeNanos));
+ AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+ currentTimeNanos = AudioClock::getNanoseconds();
+ }
+ }
+
+ // return error or framesWritten
+ return (result < 0) ? result : numFrames - framesLeft;
+}
+
+// Write as much data as we can without blocking.
+aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
+ aaudio_nanoseconds_t currentNanoTime, aaudio_nanoseconds_t *wakeTimePtr) {
+ {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ }
+
+ if (mAudioEndpoint.isOutputFreeRunning()) {
+ // Update data queue based on the timing model.
+ int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
+ // If the read index passed the write index then consider it an underrun.
+ if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+ }
+ // TODO else query from endpoint cuz set by actual reader, maybe
+
+ // Write some data to the buffer.
+ int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
+ if (framesWritten > 0) {
+ incrementFramesWritten(framesWritten);
+ }
+ //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+ // numFrames, framesWritten);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesWritten >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ aaudio_nanoseconds_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
+ switch (getState()) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (framesWritten != 0) {
+ // Don't wait to write more data. Just prime the buffer.
+ wakeTime = currentNanoTime;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesWritten;
+}
+
+aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+
+{
+ aaudio_result_t result = processCommands();
+// ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ // TODO replace this polling with a timed sleep on a futex on the message queue
+ int32_t durationNanos = 5 * AAUDIO_NANOS_PER_MILLISECOND;
+ aaudio_stream_state_t state = getState();
+// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+ while (state == currentState && timeoutNanoseconds > 0) {
+ // TODO use futex from service message queue
+ if (durationNanos > timeoutNanoseconds) {
+ durationNanos = timeoutNanoseconds;
+ }
+ AudioClock::sleepForNanos(durationNanos);
+ timeoutNanoseconds -= durationNanos;
+
+ result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ state = getState();
+// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+ }
+ if (nextState != nullptr) {
+ *nextState = state;
+ }
+ return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
+}
+
+
+void AudioStreamInternal::processTimestamp(uint64_t position, aaudio_nanoseconds_t time) {
+ mClockModel.processTimestamp( position, time);
+}
+
+aaudio_result_t AudioStreamInternal::setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames) {
+ return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+}
+
+aaudio_size_frames_t AudioStreamInternal::getBufferSize() const
+{
+ return mAudioEndpoint.getBufferSizeInFrames();
+}
+
+aaudio_size_frames_t AudioStreamInternal::getBufferCapacity() const
+{
+ return mAudioEndpoint.getBufferCapacityInFrames();
+}
+
+aaudio_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+{
+ return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+}
+
+aaudio_position_frames_t AudioStreamInternal::getFramesRead()
+{
+ aaudio_position_frames_t framesRead =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (framesRead < mLastFramesRead) {
+ framesRead = mLastFramesRead;
+ } else {
+ mLastFramesRead = framesRead;
+ }
+ ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ return framesRead;
+}
+
+// TODO implement getTimestamp
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
new file mode 100644
index 0000000..666df3a
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIOSTREAMINTERNAL_H
+#define AAUDIO_AUDIOSTREAMINTERNAL_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "client/IsochronousClockModel.h"
+#include "client/AudioEndpoint.h"
+#include "core/AudioStream.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+// A stream that talks to the AAudioService or directly to a HAL.
+class AudioStreamInternal : public AudioStream {
+
+public:
+ AudioStreamInternal();
+ virtual ~AudioStreamInternal();
+
+ // =========== Begin ABSTRACT methods ===========================
+ virtual aaudio_result_t requestStart() override;
+
+ virtual aaudio_result_t requestPause() override;
+
+ virtual aaudio_result_t requestFlush() override;
+
+ virtual aaudio_result_t requestStop() override;
+
+ // TODO use aaudio_clockid_t all the way down to AudioClock
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds) override;
+
+
+ virtual aaudio_result_t updateState() override;
+ // =========== End ABSTRACT methods ===========================
+
+ virtual aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
+ virtual aaudio_result_t close() override;
+
+ virtual aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+ virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+ virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames) override;
+
+ virtual aaudio_size_frames_t getBufferSize() const override;
+
+ virtual aaudio_size_frames_t getBufferCapacity() const override;
+
+ virtual aaudio_size_frames_t getFramesPerBurst() const override;
+
+ virtual aaudio_position_frames_t getFramesRead() override;
+
+ virtual int32_t getXRunCount() const override {
+ return mXRunCount;
+ }
+
+ virtual aaudio_result_t registerThread() override;
+
+ virtual aaudio_result_t unregisterThread() override;
+
+protected:
+
+ aaudio_result_t processCommands();
+
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+ virtual aaudio_result_t writeNow(const void *buffer,
+ int32_t numFrames,
+ aaudio_nanoseconds_t currentTimeNanos,
+ aaudio_nanoseconds_t *wakeTimePtr);
+
+ void onFlushFromServer();
+
+ aaudio_result_t onEventFromServer(AAudioServiceMessage *message);
+
+ aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
+
+private:
+ IsochronousClockModel mClockModel;
+ AudioEndpoint mAudioEndpoint;
+ aaudio_handle_t mServiceStreamHandle;
+ EndpointDescriptor mEndpointDescriptor;
+ // Offset from underlying frame position.
+ aaudio_position_frames_t mFramesOffsetFromService = 0;
+ aaudio_position_frames_t mLastFramesRead = 0;
+ aaudio_size_frames_t mFramesPerBurst;
+ int32_t mXRunCount = 0;
+
+ void processTimestamp(uint64_t position, aaudio_nanoseconds_t time);
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AUDIOSTREAMINTERNAL_H
diff --git a/media/liboboe/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
similarity index 72%
rename from media/liboboe/src/client/IsochronousClockModel.cpp
rename to media/libaaudio/src/client/IsochronousClockModel.cpp
index b8e5538..bdb491d 100644
--- a/media/liboboe/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -14,19 +14,19 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <stdint.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "IsochronousClockModel.h"
-#define MIN_LATENESS_NANOS (10 * OBOE_NANOS_PER_MICROSECOND)
+#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
using namespace android;
-using namespace oboe;
+using namespace aaudio;
IsochronousClockModel::IsochronousClockModel()
: mSampleRate(48000)
@@ -41,21 +41,21 @@
IsochronousClockModel::~IsochronousClockModel() {
}
-void IsochronousClockModel::start(oboe_nanoseconds_t nanoTime)
+void IsochronousClockModel::start(aaudio_nanoseconds_t nanoTime)
{
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
-void IsochronousClockModel::stop(oboe_nanoseconds_t nanoTime)
+void IsochronousClockModel::stop(aaudio_nanoseconds_t nanoTime)
{
mMarkerNanoTime = nanoTime;
mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
mState = STATE_STOPPED;
}
-void IsochronousClockModel::processTimestamp(oboe_position_frames_t framePosition,
- oboe_nanoseconds_t nanoTime) {
+void IsochronousClockModel::processTimestamp(aaudio_position_frames_t framePosition,
+ aaudio_nanoseconds_t nanoTime) {
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@@ -133,41 +133,41 @@
mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
}
-oboe_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
- oboe_position_frames_t framesDelta) const {
- return (OBOE_NANOS_PER_SECOND * framesDelta) / mSampleRate;
+aaudio_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
+ aaudio_position_frames_t framesDelta) const {
+ return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
}
-int64_t IsochronousClockModel::convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const {
- return (mSampleRate * nanosDelta) / OBOE_NANOS_PER_SECOND;
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const {
+ return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
}
-oboe_nanoseconds_t IsochronousClockModel::convertPositionToTime(
- oboe_position_frames_t framePosition) const {
+aaudio_nanoseconds_t IsochronousClockModel::convertPositionToTime(
+ aaudio_position_frames_t framePosition) const {
if (mState == STATE_STOPPED) {
return mMarkerNanoTime;
}
- oboe_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
- oboe_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
- oboe_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
- oboe_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
- oboe_nanoseconds_t time = (oboe_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+ aaudio_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+ aaudio_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+ aaudio_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+ aaudio_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+ aaudio_nanoseconds_t time = (aaudio_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
// ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
return time;
}
-oboe_position_frames_t IsochronousClockModel::convertTimeToPosition(
- oboe_nanoseconds_t nanoTime) const {
+aaudio_position_frames_t IsochronousClockModel::convertTimeToPosition(
+ aaudio_nanoseconds_t nanoTime) const {
if (mState == STATE_STOPPED) {
return mMarkerFramePosition;
}
- oboe_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
- oboe_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
- oboe_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
- oboe_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
- oboe_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+ aaudio_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
+ aaudio_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+ aaudio_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+ aaudio_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+ aaudio_position_frames_t position = nextBurstIndex * mFramesPerBurst;
// ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
// (unsigned long long)nanoTime,
// (unsigned long long)position);
diff --git a/media/liboboe/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
similarity index 65%
rename from media/liboboe/src/client/IsochronousClockModel.h
rename to media/libaaudio/src/client/IsochronousClockModel.h
index 97be325..b188a3d 100644
--- a/media/liboboe/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -14,13 +14,13 @@
* limitations under the License.
*/
-#ifndef OBOE_ISOCHRONOUSCLOCKMODEL_H
-#define OBOE_ISOCHRONOUSCLOCKMODEL_H
+#ifndef AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#define AAUDIO_ISOCHRONOUSCLOCKMODEL_H
#include <stdint.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
-namespace oboe {
+namespace aaudio {
/**
* Model an isochronous data stream using occasional timestamps as input.
@@ -34,17 +34,17 @@
IsochronousClockModel();
virtual ~IsochronousClockModel();
- void start(oboe_nanoseconds_t nanoTime);
- void stop(oboe_nanoseconds_t nanoTime);
+ void start(aaudio_nanoseconds_t nanoTime);
+ void stop(aaudio_nanoseconds_t nanoTime);
- void processTimestamp(oboe_position_frames_t framePosition, oboe_nanoseconds_t nanoTime);
+ void processTimestamp(aaudio_position_frames_t framePosition, aaudio_nanoseconds_t nanoTime);
/**
* @param sampleRate rate of the stream in frames per second
*/
- void setSampleRate(oboe_sample_rate_t sampleRate);
+ void setSampleRate(aaudio_sample_rate_t sampleRate);
- oboe_sample_rate_t getSampleRate() const {
+ aaudio_sample_rate_t getSampleRate() const {
return mSampleRate;
}
@@ -53,9 +53,9 @@
*
* @param framesPerBurst number of frames that stream advance at one time.
*/
- void setFramesPerBurst(oboe_size_frames_t framesPerBurst);
+ void setFramesPerBurst(aaudio_size_frames_t framesPerBurst);
- oboe_size_frames_t getFramesPerBurst() const {
+ aaudio_size_frames_t getFramesPerBurst() const {
return mFramesPerBurst;
}
@@ -65,7 +65,7 @@
* @param framePosition position of the stream in frames
* @return time in nanoseconds
*/
- oboe_nanoseconds_t convertPositionToTime(oboe_position_frames_t framePosition) const;
+ aaudio_nanoseconds_t convertPositionToTime(aaudio_position_frames_t framePosition) const;
/**
* Calculate an estimated position where the stream will be at the specified time.
@@ -73,19 +73,19 @@
* @param nanoTime time of interest
* @return position in frames
*/
- oboe_position_frames_t convertTimeToPosition(oboe_nanoseconds_t nanoTime) const;
+ aaudio_position_frames_t convertTimeToPosition(aaudio_nanoseconds_t nanoTime) const;
/**
* @param framesDelta difference in frames
* @return duration in nanoseconds
*/
- oboe_nanoseconds_t convertDeltaPositionToTime(oboe_position_frames_t framesDelta) const;
+ aaudio_nanoseconds_t convertDeltaPositionToTime(aaudio_position_frames_t framesDelta) const;
/**
* @param nanosDelta duration in nanoseconds
* @return frames that stream will advance in that time
*/
- oboe_position_frames_t convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const;
+ aaudio_position_frames_t convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const;
private:
enum clock_model_state_t {
@@ -95,17 +95,17 @@
STATE_RUNNING
};
- oboe_sample_rate_t mSampleRate;
- oboe_size_frames_t mFramesPerBurst;
+ aaudio_sample_rate_t mSampleRate;
+ aaudio_size_frames_t mFramesPerBurst;
int32_t mMaxLatenessInNanos;
- oboe_position_frames_t mMarkerFramePosition;
- oboe_nanoseconds_t mMarkerNanoTime;
+ aaudio_position_frames_t mMarkerFramePosition;
+ aaudio_nanoseconds_t mMarkerNanoTime;
int32_t mTimestampCount;
clock_model_state_t mState;
void update();
};
-} /* namespace oboe */
+} /* namespace aaudio */
-#endif //OBOE_ISOCHRONOUSCLOCKMODEL_H
+#endif //AAUDIO_ISOCHRONOUSCLOCKMODEL_H
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
new file mode 100644
index 0000000..04dbda1
--- /dev/null
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+#include "client/AudioStreamInternal.h"
+#include "HandleTracker.h"
+
+using namespace aaudio;
+
+// This is not the maximum theoretic possible number of handles that the HandlerTracker
+// class could support; instead it is the maximum number of handles that we are configuring
+// for our HandleTracker instance (sHandleTracker).
+#define AAUDIO_MAX_HANDLES 64
+
+// Macros for common code that includes a return.
+// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
+#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
+ convertAAudioBuilderToStreamBuilder(builder); \
+ if (streamBuilder == nullptr) { \
+ return AAUDIO_ERROR_INVALID_HANDLE; \
+ }
+
+#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
+ CONVERT_BUILDER_HANDLE_OR_RETURN() \
+ if ((resultPtr) == nullptr) { \
+ return AAUDIO_ERROR_NULL; \
+ }
+
+#define CONVERT_STREAM_HANDLE_OR_RETURN() \
+ convertAAudioStreamToAudioStream(stream); \
+ if (audioStream == nullptr) { \
+ return AAUDIO_ERROR_INVALID_HANDLE; \
+ }
+
+#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
+ CONVERT_STREAM_HANDLE_OR_RETURN(); \
+ if ((resultPtr) == nullptr) { \
+ return AAUDIO_ERROR_NULL; \
+ }
+
+// Static data.
+// TODO static constructors are discouraged, alternatives?
+static HandleTracker sHandleTracker(AAUDIO_MAX_HANDLES);
+
+typedef enum
+{
+ AAUDIO_HANDLE_TYPE_STREAM,
+ AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+ AAUDIO_HANDLE_TYPE_COUNT
+} aaudio_handle_type_t;
+static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+
+#define AAUDIO_CASE_ENUM(name) case name: return #name
+
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
+ switch (returnCode) {
+ AAUDIO_CASE_ENUM(AAUDIO_OK);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INCOMPATIBLE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_QUERY);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_ORDER);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+ }
+ return "Unrecognized AAudio error.";
+}
+
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
+ switch (state) {
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ return "Unrecognized AAudio state.";
+}
+
+#undef AAUDIO_CASE_ENUM
+
+static AudioStream *convertAAudioStreamToAudioStream(AAudioStream stream)
+{
+ return (AudioStream *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
+ (aaudio_handle_t) stream);
+}
+
+static AudioStreamBuilder *convertAAudioBuilderToStreamBuilder(AAudioStreamBuilder builder)
+{
+ return (AudioStreamBuilder *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+ (aaudio_handle_t) builder);
+}
+
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder)
+{
+ ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
+ if (!sHandleTracker.isInitialized()) {
+ return AAUDIO_ERROR_NO_MEMORY;
+ }
+ AudioStreamBuilder *audioStreamBuilder = new AudioStreamBuilder();
+ if (audioStreamBuilder == nullptr) {
+ return AAUDIO_ERROR_NO_MEMORY;
+ }
+ ALOGD("AAudio_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
+ // TODO protect the put() with a Mutex
+ AAudioStreamBuilder handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+ audioStreamBuilder);
+ if (handle < 0) {
+ delete audioStreamBuilder;
+ return static_cast<aaudio_result_t>(handle);
+ } else {
+ *builder = handle;
+ }
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+ aaudio_device_id_t deviceId)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ streamBuilder->setDeviceId(deviceId);
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
+ aaudio_device_id_t *deviceId)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
+ *deviceId = streamBuilder->getDeviceId();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+ aaudio_sample_rate_t sampleRate)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ streamBuilder->setSampleRate(sampleRate);
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
+ aaudio_sample_rate_t *sampleRate)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
+ *sampleRate = streamBuilder->getSampleRate();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+ int32_t samplesPerFrame)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ streamBuilder->setSamplesPerFrame(samplesPerFrame);
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
+ int32_t *samplesPerFrame)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
+ *samplesPerFrame = streamBuilder->getSamplesPerFrame();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+ aaudio_direction_t direction)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ streamBuilder->setDirection(direction);
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
+ aaudio_direction_t *direction)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
+ *direction = streamBuilder->getDirection();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+ aaudio_audio_format_t format)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ streamBuilder->setFormat(format);
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
+ aaudio_audio_format_t *format)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
+ *format = streamBuilder->getFormat();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+ aaudio_sharing_mode_t sharingMode)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ if ((sharingMode < 0) || (sharingMode >= AAUDIO_SHARING_MODE_COUNT)) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ streamBuilder->setSharingMode(sharingMode);
+ return AAUDIO_OK;
+ }
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
+ aaudio_sharing_mode_t *sharingMode)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
+ *sharingMode = streamBuilder->getSharingMode();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setBufferCapacity(AAudioStreamBuilder builder,
+ aaudio_size_frames_t frames)
+{
+ AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+ if (frames < 0) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ streamBuilder->setBufferCapacity(frames);
+ return AAUDIO_OK;
+ }
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getBufferCapacity(AAudioStreamBuilder builder,
+ aaudio_size_frames_t *frames)
+{
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(frames);
+ *frames = streamBuilder->getBufferCapacity();
+ return AAUDIO_OK;
+}
+
+static aaudio_result_t AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
+ AAudioStream *streamPtr)
+{
+ AudioStream *audioStream = nullptr;
+ aaudio_result_t result = streamBuilder->build(&audioStream);
+ if (result != AAUDIO_OK) {
+ return result;
+ } else {
+ // Create a handle for referencing the object.
+ // TODO protect the put() with a Mutex
+ AAudioStream handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, audioStream);
+ if (handle < 0) {
+ delete audioStream;
+ return static_cast<aaudio_result_t>(handle);
+ }
+ *streamPtr = handle;
+ return AAUDIO_OK;
+ }
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
+ AAudioStream *streamPtr)
+{
+ ALOGD("AAudioStreamBuilder_openStream(): builder = 0x%08X", builder);
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
+ return AAudioInternal_openStream(streamBuilder, streamPtr);
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder builder)
+{
+ AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
+ sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM_BUILDER, builder);
+ if (streamBuilder != nullptr) {
+ delete streamBuilder;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream stream)
+{
+ AudioStream *audioStream = (AudioStream *)
+ sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM, (aaudio_handle_t)stream);
+ ALOGD("AAudioStream_close(0x%08X), audioStream = %p", stream, audioStream);
+ if (audioStream != nullptr) {
+ audioStream->close();
+ delete audioStream;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream stream)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ ALOGD("AAudioStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
+ return audioStream->requestStart();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream stream)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ ALOGD("AAudioStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
+ return audioStream->requestPause();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream stream)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ ALOGD("AAudioStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
+ return audioStream->requestFlush();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream stream)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ ALOGD("AAudioStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
+ return audioStream->requestStop();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
+ aaudio_stream_state_t inputState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - non-blocking I/O
+// ============================================================
+
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
+ void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ if (buffer == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ }
+ if (numFrames < 0) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else if (numFrames == 0) {
+ return 0;
+ }
+
+ aaudio_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
+ // ALOGD("AAudioStream_read(): read returns %d", result);
+
+ return result;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
+ const void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ if (buffer == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ }
+ if (numFrames < 0) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else if (numFrames == 0) {
+ return 0;
+ }
+
+ aaudio_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
+ // ALOGD("AAudioStream_write(): write returns %d", result);
+
+ return result;
+}
+
+// ============================================================
+// Miscellaneous
+// ============================================================
+
+AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
+ aaudio_nanoseconds_t periodNanoseconds,
+ aaudio_audio_thread_proc_t *threadProc, void *arg)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ return audioStream->createThread(periodNanoseconds, threadProc, arg);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
+ void **returnArg,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ return audioStream->joinThread(returnArg, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+// TODO Use aaudio_clockid_t all the way down through the C++ streams.
+static clockid_t AAudioConvert_fromAAudioClockId(aaudio_clockid_t clockid)
+{
+ clockid_t hostClockId;
+ switch (clockid) {
+ case AAUDIO_CLOCK_MONOTONIC:
+ hostClockId = CLOCK_MONOTONIC;
+ break;
+ case AAUDIO_CLOCK_BOOTTIME:
+ hostClockId = CLOCK_BOOTTIME;
+ break;
+ default:
+ hostClockId = 0; // TODO review
+ }
+ return hostClockId;
+}
+
+aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid)
+{
+ clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
+ return AudioClock::getNanoseconds(hostClockId);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream, aaudio_sample_rate_t *sampleRate)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
+ *sampleRate = audioStream->getSampleRate();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream, int32_t *samplesPerFrame)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
+ *samplesPerFrame = audioStream->getSamplesPerFrame();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
+ *state = audioStream->getState();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream, aaudio_audio_format_t *format)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
+ *format = audioStream->getFormat();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
+ aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ return audioStream->setBufferSize(requestedFrames, actualFrames);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream, aaudio_size_frames_t *frames)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+ *frames = audioStream->getBufferSize();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream, int32_t *direction)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
+ *direction = audioStream->getDirection();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
+ aaudio_size_frames_t *framesPerBurst)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
+ *framesPerBurst = audioStream->getFramesPerBurst();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
+ aaudio_size_frames_t *capacity)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
+ *capacity = audioStream->getBufferCapacity();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
+ *xRunCount = audioStream->getXRunCount();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
+ aaudio_device_id_t *deviceId)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
+ *deviceId = audioStream->getDeviceId();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
+ aaudio_sharing_mode_t *sharingMode)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
+ *sharingMode = audioStream->getSharingMode();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
+ aaudio_position_frames_t *frames)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+ *frames = audioStream->getFramesWritten();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream, aaudio_position_frames_t *frames)
+{
+ AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+ *frames = audioStream->getFramesRead();
+ return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
+ aaudio_clockid_t clockid,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds)
+{
+ AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+ if (framePosition == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ } else if (timeNanoseconds == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ } else if (clockid != AAUDIO_CLOCK_MONOTONIC && clockid != AAUDIO_CLOCK_BOOTTIME) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
+ return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
+}
diff --git a/media/liboboe/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
similarity index 66%
rename from media/liboboe/src/core/AudioStream.cpp
rename to media/libaaudio/src/core/AudioStream.cpp
index cc654c3..77d3cc0 100644
--- a/media/liboboe/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -14,19 +14,19 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <atomic>
#include <stdint.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
#include "AudioStreamBuilder.h"
#include "AudioStream.h"
#include "AudioClock.h"
-using namespace oboe;
+using namespace aaudio;
AudioStream::AudioStream() {
// mThread is a pthread_t of unknown size so we need memset.
@@ -34,7 +34,7 @@
setPeriodNanoseconds(0);
}
-oboe_result_t AudioStream::open(const AudioStreamBuilder& builder)
+aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
// TODO validate parameters.
// Copy parameters from the Builder because the Builder may be deleted after this call.
@@ -43,41 +43,41 @@
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
mSharingMode = builder.getSharingMode();
- return OBOE_OK;
+ return AAUDIO_OK;
}
AudioStream::~AudioStream() {
close();
}
-oboe_result_t AudioStream::waitForStateTransition(oboe_stream_state_t startingState,
- oboe_stream_state_t endingState,
- oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
+ aaudio_stream_state_t endingState,
+ aaudio_nanoseconds_t timeoutNanoseconds)
{
- oboe_stream_state_t state = getState();
- oboe_stream_state_t nextState = state;
+ aaudio_stream_state_t state = getState();
+ aaudio_stream_state_t nextState = state;
if (state == startingState && state != endingState) {
- oboe_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
- if (result != OBOE_OK) {
+ aaudio_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
+ if (result != AAUDIO_OK) {
return result;
}
}
// It's OK if the expected transition has already occurred.
// But if we reach an unexpected state then that is an error.
if (nextState != endingState) {
- return OBOE_ERROR_UNEXPECTED_STATE;
+ return AAUDIO_ERROR_UNEXPECTED_STATE;
} else {
- return OBOE_OK;
+ return AAUDIO_OK;
}
}
-oboe_result_t AudioStream::waitForStateChange(oboe_stream_state_t currentState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds)
{
// TODO replace this when similar functionality added to AudioTrack.cpp
- oboe_nanoseconds_t durationNanos = 20 * OBOE_NANOS_PER_MILLISECOND;
- oboe_stream_state_t state = getState();
+ aaudio_nanoseconds_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+ aaudio_stream_state_t state = getState();
while (state == currentState && timeoutNanoseconds > 0) {
if (durationNanos > timeoutNanoseconds) {
durationNanos = timeoutNanoseconds;
@@ -85,8 +85,8 @@
AudioClock::sleepForNanos(durationNanos);
timeoutNanoseconds -= durationNanos;
- oboe_result_t result = updateState();
- if (result != OBOE_OK) {
+ aaudio_result_t result = updateState();
+ if (result != AAUDIO_OK) {
return result;
}
@@ -95,7 +95,7 @@
if (nextState != nullptr) {
*nextState = state;
}
- return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
+ return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
}
// This registers the app's background audio thread with the server before
@@ -104,7 +104,7 @@
void* AudioStream::wrapUserThread() {
void* procResult = nullptr;
mThreadRegistrationResult = registerThread();
- if (mThreadRegistrationResult == OBOE_OK) {
+ if (mThreadRegistrationResult == AAUDIO_OK) {
// Call application procedure. This may take a very long time.
procResult = mThreadProc(mThreadArg);
ALOGD("AudioStream::mThreadProc() returned");
@@ -120,15 +120,15 @@
return audioStream->wrapUserThread();
}
-oboe_result_t AudioStream::createThread(oboe_nanoseconds_t periodNanoseconds,
- oboe_audio_thread_proc_t *threadProc,
+aaudio_result_t AudioStream::createThread(aaudio_nanoseconds_t periodNanoseconds,
+ aaudio_audio_thread_proc_t *threadProc,
void* threadArg)
{
if (mHasThread) {
- return OBOE_ERROR_INVALID_STATE;
+ return AAUDIO_ERROR_INVALID_STATE;
}
if (threadProc == nullptr) {
- return OBOE_ERROR_NULL;
+ return AAUDIO_ERROR_NULL;
}
// Pass input parameters to the background thread.
mThreadProc = threadProc;
@@ -136,18 +136,18 @@
setPeriodNanoseconds(periodNanoseconds);
int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
if (err != 0) {
- // TODO convert errno to oboe_result_t
- return OBOE_ERROR_INTERNAL;
+ // TODO convert errno to aaudio_result_t
+ return AAUDIO_ERROR_INTERNAL;
} else {
mHasThread = true;
- return OBOE_OK;
+ return AAUDIO_OK;
}
}
-oboe_result_t AudioStream::joinThread(void** returnArg, oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, aaudio_nanoseconds_t timeoutNanoseconds)
{
if (!mHasThread) {
- return OBOE_ERROR_INVALID_STATE;
+ return AAUDIO_ERROR_INVALID_STATE;
}
#if 0
// TODO implement equivalent of pthread_timedjoin_np()
@@ -157,7 +157,7 @@
int err = pthread_join(mThread, returnArg);
#endif
mHasThread = false;
- // TODO convert errno to oboe_result_t
- return err ? OBOE_ERROR_INTERNAL : mThreadRegistrationResult;
+ // TODO convert errno to aaudio_result_t
+ return err ? AAUDIO_ERROR_INTERNAL : mThreadRegistrationResult;
}
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
new file mode 100644
index 0000000..8e4aa05
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIOSTREAM_H
+#define AAUDIO_AUDIOSTREAM_H
+
+#include <atomic>
+#include <stdint.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "AAudioUtilities.h"
+#include "MonotonicCounter.h"
+
+namespace aaudio {
+
+class AudioStreamBuilder;
+
+/**
+ * AAudio audio stream.
+ */
+class AudioStream {
+public:
+
+ AudioStream();
+
+ virtual ~AudioStream();
+
+
+ // =========== Begin ABSTRACT methods ===========================
+
+ /* Asynchronous requests.
+ * Use waitForStateChange() to wait for completion.
+ */
+ virtual aaudio_result_t requestStart() = 0;
+ virtual aaudio_result_t requestPause() = 0;
+ virtual aaudio_result_t requestFlush() = 0;
+ virtual aaudio_result_t requestStop() = 0;
+
+ // TODO use aaudio_clockid_t all the way down to AudioClock
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds) = 0;
+
+
+ virtual aaudio_result_t updateState() = 0;
+
+
+ // =========== End ABSTRACT methods ===========================
+
+ virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+ /**
+ * Open the stream using the parameters in the builder.
+ * Allocate the necessary resources.
+ */
+ virtual aaudio_result_t open(const AudioStreamBuilder& builder);
+
+ /**
+ * Close the stream and deallocate any resources from the open() call.
+ * It is safe to call close() multiple times.
+ */
+ virtual aaudio_result_t close() {
+ return AAUDIO_OK;
+ }
+
+ virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_result_t createThread(aaudio_nanoseconds_t periodNanoseconds,
+ aaudio_audio_thread_proc_t *threadProc,
+ void *threadArg);
+
+ virtual aaudio_result_t joinThread(void **returnArg, aaudio_nanoseconds_t timeoutNanoseconds);
+
+ virtual aaudio_result_t registerThread() {
+ return AAUDIO_OK;
+ }
+
+ virtual aaudio_result_t unregisterThread() {
+ return AAUDIO_OK;
+ }
+
+ /**
+ * Internal function used to call the audio thread passed by the user.
+ * It is unfortunately public because it needs to be called by a static 'C' function.
+ */
+ void* wrapUserThread();
+
+ // ============== Queries ===========================
+
+ virtual aaudio_stream_state_t getState() const {
+ return mState;
+ }
+
+ virtual aaudio_size_frames_t getBufferSize() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_size_frames_t getBufferCapacity() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_size_frames_t getFramesPerBurst() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual int32_t getXRunCount() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ bool isPlaying() const {
+ return mState == AAUDIO_STREAM_STATE_STARTING || mState == AAUDIO_STREAM_STATE_STARTED;
+ }
+
+ aaudio_result_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ aaudio_audio_format_t getFormat() const {
+ return mFormat;
+ }
+
+ aaudio_result_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ aaudio_device_id_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ aaudio_sharing_mode_t getSharingMode() const {
+ return mSharingMode;
+ }
+
+ aaudio_direction_t getDirection() const {
+ return mDirection;
+ }
+
+ aaudio_size_bytes_t getBytesPerFrame() const {
+ return mSamplesPerFrame * getBytesPerSample();
+ }
+
+ aaudio_size_bytes_t getBytesPerSample() const {
+ return AAudioConvert_formatToSizeInBytes(mFormat);
+ }
+
+ virtual aaudio_position_frames_t getFramesWritten() {
+ return mFramesWritten.get();
+ }
+
+ virtual aaudio_position_frames_t getFramesRead() {
+ return mFramesRead.get();
+ }
+
+
+ // ============== I/O ===========================
+ // A Stream will only implement read() or write() depending on its direction.
+ virtual aaudio_result_t write(const void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_result_t read(void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+protected:
+
+ virtual aaudio_position_frames_t incrementFramesWritten(aaudio_size_frames_t frames) {
+ return static_cast<aaudio_position_frames_t>(mFramesWritten.increment(frames));
+ }
+
+ virtual aaudio_position_frames_t incrementFramesRead(aaudio_size_frames_t frames) {
+ return static_cast<aaudio_position_frames_t>(mFramesRead.increment(frames));
+ }
+
+ /**
+ * Wait for a transition from one state to another.
+ * @return AAUDIO_OK if the endingState was observed, or AAUDIO_ERROR_UNEXPECTED_STATE
+ * if any state that was not the startingState or endingState was observed
+ * or AAUDIO_ERROR_TIMEOUT
+ */
+ virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
+ aaudio_stream_state_t endingState,
+ aaudio_nanoseconds_t timeoutNanoseconds);
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSampleRate(aaudio_sample_rate_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSamplesPerFrame(int32_t samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSharingMode(aaudio_sharing_mode_t sharingMode) {
+ mSharingMode = sharingMode;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setFormat(aaudio_audio_format_t format) {
+ mFormat = format;
+ }
+
+ void setState(aaudio_stream_state_t state) {
+ mState = state;
+ }
+
+
+
+protected:
+ MonotonicCounter mFramesWritten;
+ MonotonicCounter mFramesRead;
+
+ void setPeriodNanoseconds(aaudio_nanoseconds_t periodNanoseconds) {
+ mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
+ }
+
+ aaudio_nanoseconds_t getPeriodNanoseconds() {
+ return mPeriodNanoseconds.load(std::memory_order_acquire);
+ }
+
+private:
+ // These do not change after open().
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_sample_rate_t mSampleRate = AAUDIO_UNSPECIFIED;
+ aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ aaudio_device_id_t mDeviceId = AAUDIO_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+ aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+
+ // background thread ----------------------------------
+ bool mHasThread = false;
+ pthread_t mThread; // initialized in constructor
+
+ // These are set by the application thread and then read by the audio pthread.
+ std::atomic<aaudio_nanoseconds_t> mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+ // TODO make atomic?
+ aaudio_audio_thread_proc_t* mThreadProc = nullptr;
+ void* mThreadArg = nullptr;
+ aaudio_result_t mThreadRegistrationResult = AAUDIO_OK;
+
+
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_AUDIOSTREAM_H */
diff --git a/media/liboboe/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
similarity index 75%
rename from media/liboboe/src/core/AudioStreamBuilder.cpp
rename to media/libaaudio/src/core/AudioStreamBuilder.cpp
index 37e1378..decd53c 100644
--- a/media/liboboe/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <new>
#include <stdint.h>
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
#include "client/AudioStreamInternal.h"
#include "core/AudioStream.h"
@@ -30,7 +30,7 @@
#include "legacy/AudioStreamRecord.h"
#include "legacy/AudioStreamTrack.h"
-using namespace oboe;
+using namespace aaudio;
/*
* AudioStreamBuilder
@@ -41,50 +41,50 @@
AudioStreamBuilder::~AudioStreamBuilder() {
}
-oboe_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
+aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
// TODO Is there a better place to put the code that decides which class to use?
AudioStream* audioStream = nullptr;
- const oboe_sharing_mode_t sharingMode = getSharingMode();
+ const aaudio_sharing_mode_t sharingMode = getSharingMode();
switch (getDirection()) {
- case OBOE_DIRECTION_INPUT:
+ case AAUDIO_DIRECTION_INPUT:
switch (sharingMode) {
- case OBOE_SHARING_MODE_LEGACY:
+ case AAUDIO_SHARING_MODE_LEGACY:
audioStream = new(std::nothrow) AudioStreamRecord();
break;
default:
ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
break;
}
break;
- case OBOE_DIRECTION_OUTPUT:
+ case AAUDIO_DIRECTION_OUTPUT:
switch (sharingMode) {
- case OBOE_SHARING_MODE_LEGACY:
+ case AAUDIO_SHARING_MODE_LEGACY:
audioStream = new(std::nothrow) AudioStreamTrack();
break;
- case OBOE_SHARING_MODE_EXCLUSIVE:
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
audioStream = new(std::nothrow) AudioStreamInternal();
break;
default:
ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
break;
}
break;
default:
ALOGE("AudioStreamBuilder(): bad direction = %d", getDirection());
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
break;
}
if (audioStream == nullptr) {
- return OBOE_ERROR_NO_MEMORY;
+ return AAUDIO_ERROR_NO_MEMORY;
}
ALOGD("AudioStreamBuilder(): created audioStream = %p", audioStream);
// TODO maybe move this out of build and pass the builder to the constructors
// Open the stream using the parameters from the builder.
- const oboe_result_t result = audioStream->open(*this);
- if (result != OBOE_OK) {
+ const aaudio_result_t result = audioStream->open(*this);
+ if (result != AAUDIO_OK) {
delete audioStream;
} else {
*streamPtr = audioStream;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
new file mode 100644
index 0000000..e72633d
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIOSTREAMBUILDER_H
+#define AAUDIO_AUDIOSTREAMBUILDER_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+
+namespace aaudio {
+
+/**
+ * Factory class for an AudioStream.
+ */
+class AudioStreamBuilder {
+public:
+ AudioStreamBuilder();
+
+ ~AudioStreamBuilder();
+
+ int getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ /**
+ * This is also known as channelCount.
+ */
+ AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ return this;
+ }
+
+ aaudio_direction_t getDirection() const {
+ return mDirection;
+ }
+
+ AudioStreamBuilder* setDirection(aaudio_direction_t direction) {
+ mDirection = direction;
+ return this;
+ }
+
+ aaudio_sample_rate_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ AudioStreamBuilder* setSampleRate(aaudio_sample_rate_t sampleRate) {
+ mSampleRate = sampleRate;
+ return this;
+ }
+
+ aaudio_audio_format_t getFormat() const {
+ return mFormat;
+ }
+
+ AudioStreamBuilder *setFormat(aaudio_audio_format_t format) {
+ mFormat = format;
+ return this;
+ }
+
+ aaudio_sharing_mode_t getSharingMode() const {
+ return mSharingMode;
+ }
+
+ AudioStreamBuilder* setSharingMode(aaudio_sharing_mode_t sharingMode) {
+ mSharingMode = sharingMode;
+ return this;
+ }
+
+ aaudio_size_frames_t getBufferCapacity() const {
+ return mBufferCapacity;
+ }
+
+ AudioStreamBuilder* setBufferCapacity(aaudio_size_frames_t frames) {
+ mBufferCapacity = frames;
+ return this;
+ }
+
+ aaudio_device_id_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ AudioStreamBuilder* setDeviceId(aaudio_device_id_t deviceId) {
+ mDeviceId = deviceId;
+ return this;
+ }
+
+ aaudio_result_t build(AudioStream **streamPtr);
+
+private:
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_sample_rate_t mSampleRate = AAUDIO_UNSPECIFIED;
+ aaudio_device_id_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+ aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+ aaudio_size_frames_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_AUDIOSTREAMBUILDER_H */
diff --git a/media/libaaudio/src/core/README.md b/media/libaaudio/src/core/README.md
new file mode 100644
index 0000000..5ce41f3
--- /dev/null
+++ b/media/libaaudio/src/core/README.md
@@ -0,0 +1,2 @@
+The core folder contains the essential AAudio files common to all implementations.
+The AAudioAudio.cpp contains the 'C' API.
diff --git a/media/liboboe/src/core/VersionExperiment.txt b/media/libaaudio/src/core/VersionExperiment.txt
similarity index 100%
rename from media/liboboe/src/core/VersionExperiment.txt
rename to media/libaaudio/src/core/VersionExperiment.txt
diff --git a/media/liboboe/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
similarity index 100%
rename from media/liboboe/src/fifo/FifoBuffer.cpp
rename to media/libaaudio/src/fifo/FifoBuffer.cpp
diff --git a/media/liboboe/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
similarity index 100%
rename from media/liboboe/src/fifo/FifoBuffer.h
rename to media/libaaudio/src/fifo/FifoBuffer.h
diff --git a/media/liboboe/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
similarity index 100%
rename from media/liboboe/src/fifo/FifoController.h
rename to media/libaaudio/src/fifo/FifoController.h
diff --git a/media/liboboe/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
similarity index 100%
rename from media/liboboe/src/fifo/FifoControllerBase.cpp
rename to media/libaaudio/src/fifo/FifoControllerBase.cpp
diff --git a/media/liboboe/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
similarity index 100%
rename from media/liboboe/src/fifo/FifoControllerBase.h
rename to media/libaaudio/src/fifo/FifoControllerBase.h
diff --git a/media/liboboe/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
similarity index 100%
rename from media/liboboe/src/fifo/FifoControllerIndirect.h
rename to media/libaaudio/src/fifo/FifoControllerIndirect.h
diff --git a/media/liboboe/src/fifo/README.md b/media/libaaudio/src/fifo/README.md
similarity index 83%
rename from media/liboboe/src/fifo/README.md
rename to media/libaaudio/src/fifo/README.md
index 61ffbae..5d0c471 100644
--- a/media/liboboe/src/fifo/README.md
+++ b/media/libaaudio/src/fifo/README.md
@@ -5,5 +5,5 @@
TODO The internal low-level implementation might be merged in some form with audio_utils fifo
and/or FMQ [after confirming that requirements are met].
-The higher-levels parts related to Oboe use of the FIFO such as API, fds, relative
+The higher-levels parts related to AAudio use of the FIFO such as API, fds, relative
location of indices and data buffer, mapping, allocation of memmory will probably be kept as-is.
diff --git a/media/liboboe/src/legacy/OboeLegacy.h b/media/libaaudio/src/legacy/AAudioLegacy.h
similarity index 83%
rename from media/liboboe/src/legacy/OboeLegacy.h
rename to media/libaaudio/src/legacy/AAudioLegacy.h
index 6803837..2ceb7d4 100644
--- a/media/liboboe/src/legacy/OboeLegacy.h
+++ b/media/libaaudio/src/legacy/AAudioLegacy.h
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#ifndef OBOE_LEGACY_H
-#define OBOE_LEGACY_H
+#ifndef AAUDIO_LEGACY_H
+#define AAUDIO_LEGACY_H
#include <stdint.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
/**
* Common code for legacy classes.
*/
/* AudioTrack uses a 32-bit frame counter that can wrap around in about a day. */
-typedef uint32_t oboe_wrapping_frames_t;
+typedef uint32_t aaudio_wrapping_frames_t;
-#endif /* OBOE_LEGACY_H */
+#endif /* AAUDIO_LEGACY_H */
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
new file mode 100644
index 0000000..17d0a54
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioRecord.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioClock.h"
+#include "AudioStreamRecord.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamRecord::AudioStreamRecord()
+ : AudioStream()
+{
+}
+
+AudioStreamRecord::~AudioStreamRecord()
+{
+ const aaudio_stream_state_t state = getState();
+ bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
+ ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
+{
+ aaudio_result_t result = AAUDIO_OK;
+
+ result = AudioStream::open(builder);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ // Try to create an AudioRecord
+
+ // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
+ int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
+ ? 2 : getSamplesPerFrame();
+ audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
+
+ AudioRecord::callback_t callback = nullptr;
+ audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
+
+ size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
+ : builder.getBufferCapacity();
+ // TODO implement an unspecified Android format then use that.
+ audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+ ? AUDIO_FORMAT_PCM_FLOAT
+ : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+
+ mAudioRecord = new AudioRecord(
+ AUDIO_SOURCE_DEFAULT,
+ getSampleRate(),
+ format,
+ channelMask,
+ mOpPackageName, // const String16& opPackageName TODO does not compile
+ frameCount,
+ callback,
+ nullptr, // void* user = nullptr,
+ 0, // uint32_t notificationFrames = 0,
+ AUDIO_SESSION_ALLOCATE,
+ AudioRecord::TRANSFER_DEFAULT,
+ flags
+ // int uid = -1,
+ // pid_t pid = -1,
+ // const audio_attributes_t* pAttributes = nullptr
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioRecord->initCheck();
+ if (status != OK) {
+ close();
+ ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ // Get the actual rate.
+ setSampleRate(mAudioRecord->getSampleRate());
+ setSamplesPerFrame(mAudioRecord->channelCount());
+ setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::close()
+{
+ // TODO add close() or release() to AudioRecord API then call it from here
+ if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ mAudioRecord.clear();
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::requestStart()
+{
+ if (mAudioRecord.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Get current position so we can detect when the track is playing.
+ status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ err = mAudioRecord->start();
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else {
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::requestPause()
+{
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+}
+
+aaudio_result_t AudioStreamRecord::requestFlush() {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+}
+
+aaudio_result_t AudioStreamRecord::requestStop() {
+ if (mAudioRecord.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ mAudioRecord->stop();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::updateState()
+{
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_wrapping_frames_t position;
+ status_t err;
+ switch (getState()) {
+ // TODO add better state visibility to AudioRecord
+ case AAUDIO_STREAM_STATE_STARTING:
+ err = mAudioRecord->getPosition(&position);
+ if (err != OK) {
+ result = AAudioConvert_androidToAAudioResult(err);
+ } else if (position != mPositionWhenStarting) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STOPPING:
+ if (mAudioRecord->stopped()) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
+ break;
+ default:
+ break;
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamRecord::read(void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
+ aaudio_size_bytes_t numBytes;
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ // TODO add timeout to AudioRecord
+ bool blocking = (timeoutNanoseconds > 0);
+ ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
+ if (bytesRead == WOULD_BLOCK) {
+ return 0;
+ } else if (bytesRead < 0) {
+ return AAudioConvert_androidToAAudioResult(bytesRead);
+ }
+ aaudio_size_frames_t framesRead = (aaudio_size_frames_t)(bytesRead / bytesPerFrame);
+ return (aaudio_result_t) framesRead;
+}
+
+aaudio_result_t AudioStreamRecord::setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames)
+{
+ *actualFrames = getBufferCapacity();
+ return AAUDIO_OK;
+}
+
+aaudio_size_frames_t AudioStreamRecord::getBufferSize() const
+{
+ return getBufferCapacity(); // TODO implement in AudioRecord?
+}
+
+aaudio_size_frames_t AudioStreamRecord::getBufferCapacity() const
+{
+ return static_cast<aaudio_size_frames_t>(mAudioRecord->frameCount());
+}
+
+int32_t AudioStreamRecord::getXRunCount() const
+{
+ return AAUDIO_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
+}
+
+aaudio_size_frames_t AudioStreamRecord::getFramesPerBurst() const
+{
+ return 192; // TODO add query to AudioRecord.cpp
+}
+
+// TODO implement getTimestamp
+
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
new file mode 100644
index 0000000..a2ac9f3
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_RECORD_H
+#define LEGACY_AUDIO_STREAM_RECORD_H
+
+#include <media/AudioRecord.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+
+namespace aaudio {
+
+/**
+ * Internal stream that uses the legacy AudioTrack path.
+ */
+class AudioStreamRecord : public AudioStream {
+public:
+ AudioStreamRecord();
+
+ virtual ~AudioStreamRecord();
+
+ virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ virtual aaudio_result_t close() override;
+
+ virtual aaudio_result_t requestStart() override;
+ virtual aaudio_result_t requestPause() override;
+ virtual aaudio_result_t requestFlush() override;
+ virtual aaudio_result_t requestStop() override;
+
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds) override {
+ return AAUDIO_ERROR_UNIMPLEMENTED; // TODO
+ }
+
+ virtual aaudio_result_t read(void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+ virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames) override;
+
+ virtual aaudio_size_frames_t getBufferSize() const override;
+
+ virtual aaudio_size_frames_t getBufferCapacity() const override;
+
+ virtual int32_t getXRunCount() const override;
+
+ virtual aaudio_size_frames_t getFramesPerBurst() const override;
+
+ virtual aaudio_result_t updateState() override;
+
+private:
+ android::sp<android::AudioRecord> mAudioRecord;
+ // TODO add 64-bit position reporting to AudioRecord and use it.
+ aaudio_wrapping_frames_t mPositionWhenStarting = 0;
+ android::String16 mOpPackageName;
+};
+
+} /* namespace aaudio */
+
+#endif /* LEGACY_AUDIO_STREAM_RECORD_H */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
new file mode 100644
index 0000000..b7d8664
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamTrack"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <media/AudioTrack.h>
+
+#include <aaudio/AAudio.h>
+#include "AudioClock.h"
+#include "AudioStreamTrack.h"
+
+
+using namespace android;
+using namespace aaudio;
+
+/*
+ * Create a stream that uses the AudioTrack.
+ */
+AudioStreamTrack::AudioStreamTrack()
+ : AudioStream()
+{
+}
+
+AudioStreamTrack::~AudioStreamTrack()
+{
+ const aaudio_stream_state_t state = getState();
+ bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
+ ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
+{
+ aaudio_result_t result = AAUDIO_OK;
+
+ result = AudioStream::open(builder);
+ if (result != OK) {
+ return result;
+ }
+
+ // Try to create an AudioTrack
+ // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
+ int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
+ ? 2 : getSamplesPerFrame();
+ audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
+ ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
+ samplesPerFrame, channelMask);
+
+ AudioTrack::callback_t callback = nullptr;
+ // TODO add more performance options
+ audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
+ size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
+ : builder.getBufferCapacity();
+ // TODO implement an unspecified AudioTrack format then use that.
+ audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+ ? AUDIO_FORMAT_PCM_FLOAT
+ : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+
+ mAudioTrack = new AudioTrack(
+ (audio_stream_type_t) AUDIO_STREAM_MUSIC,
+ getSampleRate(),
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ callback,
+ nullptr, // user callback data
+ 0, // notificationFrames
+ AUDIO_SESSION_ALLOCATE,
+ AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioTrack->initCheck();
+ ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
+ if (status != NO_ERROR) {
+ close();
+ ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ // Get the actual values from the AudioTrack.
+ setSamplesPerFrame(mAudioTrack->channelCount());
+ setSampleRate(mAudioTrack->getSampleRate());
+ setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format()));
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::close()
+{
+ // TODO maybe add close() or release() to AudioTrack API then call it from here
+ if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ mAudioTrack.clear(); // TODO is this right?
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestStart()
+{
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Get current position so we can detect when the track is playing.
+ status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ err = mAudioTrack->start();
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else {
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestPause()
+{
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ } else if (getState() != AAUDIO_STREAM_STATE_STARTING
+ && getState() != AAUDIO_STREAM_STATE_STARTED) {
+ ALOGE("requestPause(), called when state is %s", AAudio_convertStreamStateToText(getState()));
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ setState(AAUDIO_STREAM_STATE_PAUSING);
+ mAudioTrack->pause();
+ status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestFlush() {
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ setState(AAUDIO_STREAM_STATE_FLUSHING);
+ incrementFramesRead(getFramesWritten() - getFramesRead());
+ mAudioTrack->flush();
+ mFramesWritten.reset32();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestStop() {
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
+ mAudioTrack->stop();
+ mFramesWritten.reset32();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::updateState()
+{
+ status_t err;
+ aaudio_wrapping_frames_t position;
+ switch (getState()) {
+ // TODO add better state visibility to AudioTrack
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (mAudioTrack->hasStarted()) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
+ break;
+ case AAUDIO_STREAM_STATE_PAUSING:
+ if (mAudioTrack->stopped()) {
+ err = mAudioTrack->getPosition(&position);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (position == mPositionWhenPausing) {
+ // Has stream really stopped advancing?
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ }
+ mPositionWhenPausing = position;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_FLUSHING:
+ {
+ err = mAudioTrack->getPosition(&position);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (position == 0) {
+ // Advance frames read to match written.
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ }
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STOPPING:
+ if (mAudioTrack->stopped()) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
+ break;
+ default:
+ break;
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::write(const void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds)
+{
+ aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
+ aaudio_size_bytes_t numBytes;
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ // TODO add timeout to AudioTrack
+ bool blocking = timeoutNanoseconds > 0;
+ ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
+ if (bytesWritten == WOULD_BLOCK) {
+ return 0;
+ } else if (bytesWritten < 0) {
+ ALOGE("invalid write, returned %d", (int)bytesWritten);
+ return AAudioConvert_androidToAAudioResult(bytesWritten);
+ }
+ aaudio_size_frames_t framesWritten = (aaudio_size_frames_t)(bytesWritten / bytesPerFrame);
+ incrementFramesWritten(framesWritten);
+ return framesWritten;
+}
+
+aaudio_result_t AudioStreamTrack::setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames)
+{
+ ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
+ if (result != OK) {
+ return AAudioConvert_androidToAAudioResult(result);
+ } else {
+ *actualFrames = result;
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_size_frames_t AudioStreamTrack::getBufferSize() const
+{
+ return static_cast<aaudio_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
+}
+
+aaudio_size_frames_t AudioStreamTrack::getBufferCapacity() const
+{
+ return static_cast<aaudio_size_frames_t>(mAudioTrack->frameCount());
+}
+
+int32_t AudioStreamTrack::getXRunCount() const
+{
+ return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
+}
+
+int32_t AudioStreamTrack::getFramesPerBurst() const
+{
+ return 192; // TODO add query to AudioTrack.cpp
+}
+
+aaudio_position_frames_t AudioStreamTrack::getFramesRead() {
+ aaudio_wrapping_frames_t position;
+ status_t result;
+ switch (getState()) {
+ case AAUDIO_STREAM_STATE_STARTING:
+ case AAUDIO_STREAM_STATE_STARTED:
+ case AAUDIO_STREAM_STATE_STOPPING:
+ result = mAudioTrack->getPosition(&position);
+ if (result == OK) {
+ mFramesRead.update32(position);
+ }
+ break;
+ default:
+ break;
+ }
+ return AudioStream::getFramesRead();
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
new file mode 100644
index 0000000..73d0cac
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_TRACK_H
+#define LEGACY_AUDIO_STREAM_TRACK_H
+
+#include <media/AudioTrack.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+
+namespace aaudio {
+
+
+/**
+ * Internal stream that uses the legacy AudioTrack path.
+ */
+class AudioStreamTrack : public AudioStream {
+public:
+ AudioStreamTrack();
+
+ virtual ~AudioStreamTrack();
+
+
+ virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ virtual aaudio_result_t close() override;
+
+ virtual aaudio_result_t requestStart() override;
+ virtual aaudio_result_t requestPause() override;
+ virtual aaudio_result_t requestFlush() override;
+ virtual aaudio_result_t requestStop() override;
+
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ aaudio_position_frames_t *framePosition,
+ aaudio_nanoseconds_t *timeNanoseconds) override {
+ return AAUDIO_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
+ }
+
+ virtual aaudio_result_t write(const void *buffer,
+ aaudio_size_frames_t numFrames,
+ aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+ virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+ aaudio_size_frames_t *actualFrames) override;
+ virtual aaudio_size_frames_t getBufferSize() const override;
+ virtual aaudio_size_frames_t getBufferCapacity() const override;
+ virtual aaudio_size_frames_t getFramesPerBurst()const override;
+ virtual int32_t getXRunCount() const override;
+
+ virtual aaudio_position_frames_t getFramesRead() override;
+
+ virtual aaudio_result_t updateState() override;
+
+private:
+ android::sp<android::AudioTrack> mAudioTrack;
+ // TODO add 64-bit position reporting to AudioRecord and use it.
+ aaudio_wrapping_frames_t mPositionWhenStarting = 0;
+ aaudio_wrapping_frames_t mPositionWhenPausing = 0;
+};
+
+} /* namespace aaudio */
+
+#endif /* LEGACY_AUDIO_STREAM_TRACK_H */
diff --git a/media/libaaudio/src/legacy/README.md b/media/libaaudio/src/legacy/README.md
new file mode 100644
index 0000000..8805915
--- /dev/null
+++ b/media/libaaudio/src/legacy/README.md
@@ -0,0 +1,2 @@
+The legacy folder contains the classes that implement AAudio AudioStream on top of
+Android AudioTrack and AudioRecord.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
new file mode 100644
index 0000000..34c1ae4
--- /dev/null
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+
+#include "aaudio/AAudioDefinitions.h"
+#include "AAudioUtilities.h"
+
+using namespace android;
+
+aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
+ aaudio_size_bytes_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ switch (format) {
+ case AAUDIO_FORMAT_PCM_I16:
+ size = sizeof(int16_t);
+ break;
+ case AAUDIO_FORMAT_PCM_I32:
+ case AAUDIO_FORMAT_PCM_I8_24:
+ size = sizeof(int32_t);
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ size = sizeof(float);
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+// TODO This similar to a function in audio_utils. Consider using that instead.
+void AAudioConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination) {
+ for (int i = 0; i < numSamples; i++) {
+ float fval = source[i];
+ fval += 1.0; // to avoid discontinuity at 0.0 caused by truncation
+ fval *= 32768.0f;
+ int32_t sample = (int32_t) fval;
+ // clip to 16-bit range
+ if (sample < 0) sample = 0;
+ else if (sample > 0x0FFFF) sample = 0x0FFFF;
+ sample -= 32768; // center at zero
+ destination[i] = (int16_t) sample;
+ }
+}
+
+void AAudioConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
+ for (int i = 0; i < numSamples; i++) {
+ destination[i] = source[i] * (1.0f / 32768.0f);
+ }
+}
+
+status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
+ // This covers the case for AAUDIO_OK and for positive results.
+ if (result >= 0) {
+ return result;
+ }
+ status_t status;
+ switch (result) {
+ case AAUDIO_ERROR_DISCONNECTED:
+ case AAUDIO_ERROR_INVALID_HANDLE:
+ status = DEAD_OBJECT;
+ break;
+ case AAUDIO_ERROR_INVALID_STATE:
+ status = INVALID_OPERATION;
+ break;
+ case AAUDIO_ERROR_UNEXPECTED_VALUE: // TODO redundant?
+ case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+ status = BAD_VALUE;
+ break;
+ case AAUDIO_ERROR_WOULD_BLOCK:
+ status = WOULD_BLOCK;
+ break;
+ // TODO add more result codes
+ default:
+ status = UNKNOWN_ERROR;
+ break;
+ }
+ return status;
+}
+
+aaudio_result_t AAudioConvert_androidToAAudioResult(status_t status) {
+ // This covers the case for OK and for positive result.
+ if (status >= 0) {
+ return status;
+ }
+ aaudio_result_t result;
+ switch (status) {
+ case BAD_TYPE:
+ result = AAUDIO_ERROR_INVALID_HANDLE;
+ break;
+ case DEAD_OBJECT:
+ result = AAUDIO_ERROR_DISCONNECTED;
+ break;
+ case INVALID_OPERATION:
+ result = AAUDIO_ERROR_INVALID_STATE;
+ break;
+ case BAD_VALUE:
+ result = AAUDIO_ERROR_UNEXPECTED_VALUE;
+ break;
+ case WOULD_BLOCK:
+ result = AAUDIO_ERROR_WOULD_BLOCK;
+ break;
+ // TODO add more status codes
+ default:
+ result = AAUDIO_ERROR_INTERNAL;
+ break;
+ }
+ return result;
+}
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudioFormat) {
+ audio_format_t androidFormat;
+ switch (aaudioFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ androidFormat = AUDIO_FORMAT_PCM_16_BIT;
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ androidFormat = AUDIO_FORMAT_PCM_FLOAT;
+ break;
+ case AAUDIO_FORMAT_PCM_I8_24:
+ androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
+ break;
+ case AAUDIO_FORMAT_PCM_I32:
+ androidFormat = AUDIO_FORMAT_PCM_32_BIT;
+ break;
+ default:
+ androidFormat = AUDIO_FORMAT_DEFAULT;
+ ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
+ break;
+ }
+ return androidFormat;
+}
+
+aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
+ aaudio_audio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
+ switch (androidFormat) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ break;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_I32;
+ break;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_I8_24;
+ break;
+ default:
+ aaudioFormat = AAUDIO_FORMAT_INVALID;
+ ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
+ break;
+ }
+ return aaudioFormat;
+}
+
+aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
+ aaudio_size_bytes_t bytesPerFrame,
+ aaudio_size_bytes_t *sizeInBytes) {
+ // TODO implement more elegantly
+ const int32_t maxChannels = 256; // ridiculously large
+ const aaudio_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
+ // Prevent overflow by limiting multiplicands.
+ if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
+ ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ *sizeInBytes = numFrames * bytesPerFrame;
+ return AAUDIO_OK;
+}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
new file mode 100644
index 0000000..38696df
--- /dev/null
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_AAUDIO_UTILITIES_H
+#define UTILITY_AAUDIO_UTILITIES_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <hardware/audio.h>
+
+#include "aaudio/AAudioDefinitions.h"
+
+/**
+ * Convert an AAudio result into the closest matching Android status.
+ */
+android::status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result);
+
+/**
+ * Convert an Android status into the closest matching AAudio result.
+ */
+aaudio_result_t AAudioConvert_androidToAAudioResult(android::status_t status);
+
+void AAudioConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination);
+
+void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination);
+
+/**
+ * Calculate the number of bytes and prevent numeric overflow.
+ * @param numFrames frame count
+ * @param bytesPerFrame size of a frame in bytes
+ * @param sizeInBytes total size in bytes
+ * @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
+ */
+aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
+ aaudio_size_bytes_t bytesPerFrame,
+ aaudio_size_bytes_t *sizeInBytes);
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudio_format);
+
+aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+
+/**
+ * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ */
+aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
+
+#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/liboboe/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
similarity index 75%
rename from media/liboboe/src/utility/AudioClock.h
rename to media/libaaudio/src/utility/AudioClock.h
index 1a5c209..9ac21d3 100644
--- a/media/liboboe/src/utility/AudioClock.h
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -14,41 +14,41 @@
* limitations under the License.
*/
-#ifndef UTILITY_AUDIOCLOCK_H
-#define UTILITY_AUDIOCLOCK_H
+#ifndef UTILITY_AUDIO_CLOCK_H
+#define UTILITY_AUDIO_CLOCK_H
#include <stdint.h>
#include <time.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
class AudioClock {
public:
- static oboe_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+ static aaudio_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
struct timespec time;
int result = clock_gettime(clockId, &time);
if (result < 0) {
return -errno;
}
- return (time.tv_sec * OBOE_NANOS_PER_SECOND) + time.tv_nsec;
+ return (time.tv_sec * AAUDIO_NANOS_PER_SECOND) + time.tv_nsec;
}
/**
* Sleep until the specified absolute time.
- * Return immediately with OBOE_ERROR_ILLEGAL_ARGUMENT if a negative
+ * Return immediately with AAUDIO_ERROR_ILLEGAL_ARGUMENT if a negative
* nanoTime is specified.
*
* @param nanoTime time to wake up
* @param clockId CLOCK_MONOTONIC is default
* @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
*/
- static int sleepUntilNanoTime(oboe_nanoseconds_t nanoTime,
+ static int sleepUntilNanoTime(aaudio_nanoseconds_t nanoTime,
clockid_t clockId = CLOCK_MONOTONIC) {
if (nanoTime > 0) {
struct timespec time;
- time.tv_sec = nanoTime / OBOE_NANOS_PER_SECOND;
+ time.tv_sec = nanoTime / AAUDIO_NANOS_PER_SECOND;
// Calculate the fractional nanoseconds. Avoids expensive % operation.
- time.tv_nsec = nanoTime - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+ time.tv_nsec = nanoTime - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
switch (err) {
case EINTR:
@@ -60,7 +60,7 @@
return 0 - err;
}
} else {
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
}
@@ -72,12 +72,12 @@
* @param clockId CLOCK_MONOTONIC is default
* @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
*/
- static int sleepForNanos(oboe_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+ static int sleepForNanos(aaudio_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
if (nanoseconds > 0) {
struct timespec time;
- time.tv_sec = nanoseconds / OBOE_NANOS_PER_SECOND;
+ time.tv_sec = nanoseconds / AAUDIO_NANOS_PER_SECOND;
// Calculate the fractional nanoseconds. Avoids expensive % operation.
- time.tv_nsec = nanoseconds - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+ time.tv_nsec = nanoseconds - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
const int flags = 0; // documented as relative sleep
int err = clock_nanosleep(clockId, flags, &time, nullptr);
switch (err) {
@@ -95,4 +95,4 @@
};
-#endif // UTILITY_AUDIOCLOCK_H
+#endif // UTILITY_AUDIO_CLOCK_H
diff --git a/media/liboboe/src/utility/HandleTracker.cpp b/media/libaaudio/src/utility/HandleTracker.cpp
similarity index 77%
rename from media/liboboe/src/utility/HandleTracker.cpp
rename to media/libaaudio/src/utility/HandleTracker.cpp
index bf5fb63..c4880b8 100644
--- a/media/liboboe/src/utility/HandleTracker.cpp
+++ b/media/libaaudio/src/utility/HandleTracker.cpp
@@ -15,17 +15,20 @@
*
*/
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <assert.h>
#include <new>
#include <stdint.h>
-#include <assert.h>
+#include <utils/Mutex.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "HandleTracker.h"
+using android::Mutex;
+
// Handle format is: tgggiiii
// where each letter is 4 bits, t=type, g=generation, i=index
@@ -80,15 +83,17 @@
HandleTracker::~HandleTracker()
{
+ Mutex::Autolock _l(mLock);
delete[] mHandleAddresses;
delete[] mHandleHeaders;
+ mHandleAddresses = nullptr;
}
bool HandleTracker::isInitialized() const {
return mHandleAddresses != nullptr;
}
-handle_tracker_slot_t HandleTracker::allocateSlot() {
+handle_tracker_slot_t HandleTracker::allocateSlot_l() {
void **allocated = mNextFreeAddress;
if (allocated == nullptr) {
return SLOT_UNAVAILABLE;
@@ -98,7 +103,7 @@
return (allocated - mHandleAddresses);
}
-handle_tracker_generation_t HandleTracker::nextGeneration(handle_tracker_slot_t index) {
+handle_tracker_generation_t HandleTracker::nextGeneration_l(handle_tracker_slot_t index) {
handle_tracker_generation_t generation = (mHandleHeaders[index] + 1) & GENERATION_MASK;
// Avoid generation zero so that 0x0 is not a valid handle.
if (generation == GENERATION_INVALID) {
@@ -107,24 +112,26 @@
return generation;
}
-oboe_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
+aaudio_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
{
if (type < 0 || type >= HANDLE_TRACKER_MAX_TYPES) {
- return static_cast<oboe_handle_t>(OBOE_ERROR_OUT_OF_RANGE);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_OUT_OF_RANGE);
}
if (!isInitialized()) {
- return static_cast<oboe_handle_t>(OBOE_ERROR_NO_MEMORY);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_MEMORY);
}
+ Mutex::Autolock _l(mLock);
+
// Find an empty slot.
- handle_tracker_slot_t index = allocateSlot();
+ handle_tracker_slot_t index = allocateSlot_l();
if (index == SLOT_UNAVAILABLE) {
ALOGE("HandleTracker::put() no room for more handles");
- return static_cast<oboe_handle_t>(OBOE_ERROR_NO_FREE_HANDLES);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_FREE_HANDLES);
}
// Cycle the generation counter so stale handles can be detected.
- handle_tracker_generation_t generation = nextGeneration(index); // reads header table
+ handle_tracker_generation_t generation = nextGeneration_l(index); // reads header table
handle_tracker_header_t inputHeader = buildHeader(type, generation);
// These two writes may need to be observed by other threads or cores during get().
@@ -133,48 +140,55 @@
// TODO use store release to enforce memory order with get()
// Generate a handle.
- oboe_handle_t handle = buildHandle(inputHeader, index);
+ aaudio_handle_t handle = buildHandle(inputHeader, index);
ALOGV("HandleTracker::put(%p) returns 0x%08x", address, handle);
return handle;
}
handle_tracker_slot_t HandleTracker::handleToIndex(handle_tracker_type_t type,
- oboe_handle_t handle) const
+ aaudio_handle_t handle) const
{
// Validate the handle.
handle_tracker_slot_t index = extractIndex(handle);
if (index >= mMaxHandleCount) {
ALOGE("HandleTracker::handleToIndex() invalid handle = 0x%08X", handle);
- return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
}
handle_tracker_generation_t handleGeneration = extractGeneration(handle);
handle_tracker_header_t inputHeader = buildHeader(type, handleGeneration);
+ // We do not need to synchronize this access to mHandleHeaders because it is constant for
+ // the lifetime of the handle.
if (inputHeader != mHandleHeaders[index]) {
ALOGE("HandleTracker::handleToIndex() inputHeader = 0x%08x != mHandleHeaders[%d] = 0x%08x",
inputHeader, index, mHandleHeaders[index]);
- return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
}
return index;
}
-handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, oboe_handle_t handle) const
+handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, aaudio_handle_t handle) const
{
if (!isInitialized()) {
return nullptr;
}
handle_tracker_slot_t index = handleToIndex(type, handle);
if (index >= 0) {
+ // We do not need to synchronize this access to mHandleHeaders because this slot
+ // is allocated and, therefore, not part of the linked list of free slots.
return mHandleAddresses[index];
} else {
return nullptr;
}
}
-handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, oboe_handle_t handle) {
+handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, aaudio_handle_t handle) {
if (!isInitialized()) {
return nullptr;
}
+
+ Mutex::Autolock _l(mLock);
+
handle_tracker_slot_t index = handleToIndex(type,handle);
if (index >= 0) {
handle_tracker_address_t address = mHandleAddresses[index];
@@ -194,9 +208,9 @@
}
}
-oboe_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
+aaudio_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
handle_tracker_slot_t index) {
- return (oboe_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
+ return (aaudio_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
}
handle_tracker_header_t HandleTracker::buildHeader(handle_tracker_type_t type,
@@ -206,12 +220,12 @@
| (generation & GENERATION_MASK));
}
-handle_tracker_slot_t HandleTracker::extractIndex(oboe_handle_t handle)
+handle_tracker_slot_t HandleTracker::extractIndex(aaudio_handle_t handle)
{
return handle & INDEX_MASK;
}
-handle_tracker_generation_t HandleTracker::extractGeneration(oboe_handle_t handle)
+handle_tracker_generation_t HandleTracker::extractGeneration(aaudio_handle_t handle)
{
return (handle >> GENERATION_SHIFT) & GENERATION_MASK;
}
diff --git a/media/liboboe/src/utility/HandleTracker.h b/media/libaaudio/src/utility/HandleTracker.h
similarity index 74%
rename from media/liboboe/src/utility/HandleTracker.h
rename to media/libaaudio/src/utility/HandleTracker.h
index 4c08321..c80860c 100644
--- a/media/liboboe/src/utility/HandleTracker.h
+++ b/media/libaaudio/src/utility/HandleTracker.h
@@ -14,10 +14,11 @@
* limitations under the License.
*/
-#ifndef UTILITY_HANDLETRACKER_H
-#define UTILITY_HANDLETRACKER_H
+#ifndef UTILITY_HANDLE_TRACKER_H
+#define UTILITY_HANDLE_TRACKER_H
#include <stdint.h>
+#include <utils/Mutex.h>
typedef int32_t handle_tracker_type_t; // what kind of handle
typedef int32_t handle_tracker_slot_t; // index in allocation table
@@ -53,11 +54,13 @@
/**
* Store a pointer and return a handle that can be used to retrieve the pointer.
*
+ * It is safe to call put() or remove() from multiple threads.
+ *
* @param expectedType the type of the object to be tracked
* @param address pointer to be converted to a handle
* @return a valid handle or a negative error
*/
- oboe_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
+ aaudio_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
/**
* Get the original pointer associated with the handle.
@@ -69,37 +72,50 @@
* @param handle to be converted to a pointer
* @return address associated with handle or nullptr
*/
- handle_tracker_address_t get(handle_tracker_type_t expectedType, oboe_handle_t handle) const;
+ handle_tracker_address_t get(handle_tracker_type_t expectedType, aaudio_handle_t handle) const;
/**
* Free up the storage associated with the handle.
* Subsequent attempts to use the handle will fail.
*
+ * Do NOT remove() a handle while get() is being called for the same handle from another thread.
+ *
* @param expectedType shouldmatch the type we passed to put()
* @param handle to be removed from tracking
* @return address associated with handle or nullptr if not found
*/
- handle_tracker_address_t remove(handle_tracker_type_t expectedType, oboe_handle_t handle);
+ handle_tracker_address_t remove(handle_tracker_type_t expectedType, aaudio_handle_t handle);
private:
const int32_t mMaxHandleCount; // size of array
- // This is const after initialization.
+ // This address is const after initialization.
handle_tracker_address_t * mHandleAddresses; // address of objects or a free linked list node
- // This is const after initialization.
+ // This address is const after initialization.
handle_tracker_header_t * mHandleHeaders; // combination of type and generation
- handle_tracker_address_t * mNextFreeAddress; // head of the linked list of free nodes in mHandleAddresses
+ // head of the linked list of free nodes in mHandleAddresses
+ handle_tracker_address_t * mNextFreeAddress;
+
+ // This Mutex protects the linked list of free nodes.
+ // The list is managed using mHandleAddresses and mNextFreeAddress.
+ // The data in mHandleHeaders is only changed by put() and remove().
+ android::Mutex mLock;
/**
* Pull slot off of a list of empty slots.
* @return index or a negative error
*/
- handle_tracker_slot_t allocateSlot();
+ handle_tracker_slot_t allocateSlot_l();
+
+ /**
+ * Increment the generation for the slot, avoiding zero.
+ */
+ handle_tracker_generation_t nextGeneration_l(handle_tracker_slot_t index);
/**
* Validate the handle and return the corresponding index.
* @return slot index or a negative error
*/
- handle_tracker_slot_t handleToIndex(oboe_handle_t handle, handle_tracker_type_t type) const;
+ handle_tracker_slot_t handleToIndex(aaudio_handle_t handle, handle_tracker_type_t type) const;
/**
* Construct a handle from a header and an index.
@@ -107,7 +123,7 @@
* @param index slot index returned from allocateSlot
* @return handle or a negative error
*/
- oboe_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
+ static aaudio_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
/**
* Combine a type and a generation field into a header.
@@ -120,20 +136,15 @@
* Does not validate the handle.
* @return index associated with a handle
*/
- static handle_tracker_slot_t extractIndex(oboe_handle_t handle);
+ static handle_tracker_slot_t extractIndex(aaudio_handle_t handle);
/**
* Extract the generation from a handle.
* Does not validate the handle.
* @return generation associated with a handle
*/
- static handle_tracker_generation_t extractGeneration(oboe_handle_t handle);
-
- /**
- * Increment the generation for the slot, avoiding zero.
- */
- handle_tracker_generation_t nextGeneration(handle_tracker_slot_t index);
+ static handle_tracker_generation_t extractGeneration(aaudio_handle_t handle);
};
-#endif //UTILITY_HANDLETRACKER_H
+#endif //UTILITY_HANDLE_TRACKER_H
diff --git a/media/liboboe/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
similarity index 95%
rename from media/liboboe/src/utility/MonotonicCounter.h
rename to media/libaaudio/src/utility/MonotonicCounter.h
index befad21..81d7f89 100644
--- a/media/liboboe/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef UTILITY_MONOTONICCOUNTER_H
-#define UTILITY_MONOTONICCOUNTER_H
+#ifndef UTILITY_MONOTONIC_COUNTER_H
+#define UTILITY_MONOTONIC_COUNTER_H
#include <stdint.h>
@@ -88,4 +88,4 @@
};
-#endif //UTILITY_MONOTONICCOUNTER_H
+#endif //UTILITY_MONOTONIC_COUNTER_H
diff --git a/media/libaaudio/src/utility/README.md b/media/libaaudio/src/utility/README.md
new file mode 100644
index 0000000..0ac74ea
--- /dev/null
+++ b/media/libaaudio/src/utility/README.md
@@ -0,0 +1,3 @@
+The utility folder contains things that may be shared between the AAudio client and server.
+They might also be handy outside AAudio.
+They generally do not depend on AAudio functionality.
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
new file mode 100644
index 0000000..24dad4a
--- /dev/null
+++ b/media/libaaudio/tests/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src/core \
+ frameworks/av/media/libaaudio/src/utility
+LOCAL_SRC_FILES := test_aaudio_api.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_aaudio_api
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src/core \
+ frameworks/av/media/libaaudio/src/utility
+LOCAL_SRC_FILES:= test_handle_tracker.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_handle_tracker
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/src/core \
+ frameworks/av/media/libaaudio/src/fifo \
+ frameworks/av/media/libaaudio/src/utility
+LOCAL_SRC_FILES:= test_marshalling.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_marshalling
+include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_aaudio_api.cpp b/media/libaaudio/tests/test_aaudio_api.cpp
new file mode 100644
index 0000000..7db3688
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_api.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for AAudio 'C' API.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <gtest/gtest.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "AAudioUtilities.h"
+
+#define DEFAULT_STATE_TIMEOUT (500 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Test AAudioStreamBuilder
+TEST(test_aaudio_api, aaudio_stream_builder) {
+ const aaudio_sample_rate_t requestedSampleRate1 = 48000;
+ const aaudio_sample_rate_t requestedSampleRate2 = 44100;
+ const int32_t requestedSamplesPerFrame = 2;
+ const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+
+ aaudio_sample_rate_t sampleRate = 0;
+ int32_t samplesPerFrame = 0;
+ aaudio_audio_format_t actualDataFormat;
+ AAudioStreamBuilder aaudioBuilder1;
+ AAudioStreamBuilder aaudioBuilder2;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to define the stream.
+ result = AAudio_createStreamBuilder(&aaudioBuilder1);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder1, requestedSampleRate1));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder1, requestedSamplesPerFrame));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setFormat(aaudioBuilder1, requestedDataFormat));
+
+ // Check to make sure builder saved the properties.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate));
+ EXPECT_EQ(requestedSampleRate1, sampleRate);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSamplesPerFrame(aaudioBuilder1, &samplesPerFrame));
+ EXPECT_EQ(requestedSamplesPerFrame, samplesPerFrame);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getFormat(aaudioBuilder1, &actualDataFormat));
+ EXPECT_EQ(requestedDataFormat, actualDataFormat);
+
+ result = AAudioStreamBuilder_getSampleRate(0x0BADCAFE, &sampleRate); // ridiculous token
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, result);
+
+ // Create a second builder and make sure they do not collide.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder2));
+ ASSERT_NE(aaudioBuilder1, aaudioBuilder2);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder2, requestedSampleRate2));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate));
+ EXPECT_EQ(requestedSampleRate1, sampleRate);
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+ EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+ // Delete the builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder1));
+
+ // Now it should no longer be valid.
+ // Note that test assumes we are using the HandleTracker. If we use plain pointers
+ // then it will be difficult to detect this kind of error.
+ result = AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate); // stale token
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, result);
+
+ // Second builder should still be valid.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+ EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+ // Delete the second builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder2));
+
+ // Now it should no longer be valid. Assumes HandlerTracker used.
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+}
+
+// Test creating a default stream with everything unspecified.
+TEST(test_aaudio_api, aaudio_stream_unspecified) {
+ AAudioStreamBuilder aaudioBuilder;
+ AAudioStream aaudioStream;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to define the stream.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+ // Cleanup
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+// Test Writing to an AAudioStream
+void runtest_aaudio_stream(aaudio_sharing_mode_t requestedSharingMode) {
+ const aaudio_sample_rate_t requestedSampleRate = 48000;
+ const aaudio_sample_rate_t requestedSamplesPerFrame = 2;
+ const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+
+ aaudio_sample_rate_t actualSampleRate = -1;
+ int32_t actualSamplesPerFrame = -1;
+ aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_INVALID;
+ aaudio_sharing_mode_t actualSharingMode;
+ aaudio_size_frames_t framesPerBurst = -1;
+ int writeLoops = 0;
+
+ aaudio_size_frames_t framesWritten = 0;
+ aaudio_size_frames_t framesPrimed = 0;
+ aaudio_position_frames_t framesTotal = 0;
+ aaudio_position_frames_t aaudioFramesRead = 0;
+ aaudio_position_frames_t aaudioFramesRead1 = 0;
+ aaudio_position_frames_t aaudioFramesRead2 = 0;
+ aaudio_position_frames_t aaudioFramesWritten = 0;
+
+ aaudio_nanoseconds_t timeoutNanos;
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ AAudioStreamBuilder aaudioBuilder;
+ AAudioStream aaudioStream;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to define the stream.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode));
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getState(aaudioStream, &state));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ // Check to see what kind of stream we actually got.
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getSampleRate(aaudioStream, &actualSampleRate));
+ ASSERT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000); // TODO what is range?
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame));
+ ASSERT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getSharingMode(aaudioStream, &actualSharingMode));
+ ASSERT_TRUE(actualSharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE
+ || actualSharingMode == AAUDIO_SHARING_MODE_LEGACY);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFormat(aaudioStream, &actualDataFormat));
+ EXPECT_NE(AAUDIO_FORMAT_INVALID, actualDataFormat);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst));
+ ASSERT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
+
+ // Allocate a buffer for the audio data.
+ // TODO handle possibility of other data formats
+ ASSERT_TRUE(actualDataFormat == AAUDIO_FORMAT_PCM16);
+ size_t dataSizeSamples = framesPerBurst * actualSamplesPerFrame;
+ int16_t *data = new int16_t[dataSizeSamples];
+ ASSERT_TRUE(nullptr != data);
+ memset(data, 0, sizeof(int16_t) * dataSizeSamples);
+
+ // Prime the buffer.
+ timeoutNanos = 0;
+ do {
+ framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+ // There should be some room for priming the buffer.
+ framesTotal += framesWritten;
+ ASSERT_GE(framesWritten, 0);
+ ASSERT_LE(framesWritten, framesPerBurst);
+ } while (framesWritten > 0);
+ ASSERT_TRUE(framesTotal > 0);
+
+ // Start/write/pause more than once to see if it fails after the first time.
+ // Write some data and measure the rate to see if the timing is OK.
+ for (int numLoops = 0; numLoops < 2; numLoops++) {
+ // Start and wait for server to respond.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_STARTING,
+ &state,
+ DEFAULT_STATE_TIMEOUT));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ // Write some data while we are running. Read counter should be advancing.
+ writeLoops = 1 * actualSampleRate / framesPerBurst; // 1 second
+ ASSERT_LT(2, writeLoops); // detect absurdly high framesPerBurst
+ timeoutNanos = 10 * AAUDIO_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
+ framesWritten = 1;
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+ aaudioFramesRead1 = aaudioFramesRead;
+ aaudio_nanoseconds_t beginTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+ do {
+ framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+ ASSERT_GE(framesWritten, 0);
+ ASSERT_LE(framesWritten, framesPerBurst);
+
+ framesTotal += framesWritten;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesWritten(aaudioStream, &aaudioFramesWritten));
+ EXPECT_EQ(framesTotal, aaudioFramesWritten);
+
+ // Try to get a more accurate measure of the sample rate.
+ if (beginTime == 0) {
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+ if (aaudioFramesRead > aaudioFramesRead1) { // is read pointer advancing
+ beginTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+ aaudioFramesRead1 = aaudioFramesRead;
+ }
+ }
+ } while (framesWritten > 0 && writeLoops-- > 0);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead2));
+ aaudio_nanoseconds_t endTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+ ASSERT_GT(aaudioFramesRead2, 0);
+ ASSERT_GT(aaudioFramesRead2, aaudioFramesRead1);
+ ASSERT_LE(aaudioFramesRead2, aaudioFramesWritten);
+
+ // TODO why is legacy so inaccurate?
+ const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
+ if (requestedSharingMode != AAUDIO_SHARING_MODE_LEGACY) {
+ // Calculate approximate sample rate and compare with stream rate.
+ double seconds = (endTime - beginTime) / (double) AAUDIO_NANOS_PER_SECOND;
+ double measuredRate = (aaudioFramesRead2 - aaudioFramesRead1) / seconds;
+ ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
+ }
+
+ // Request async pause and wait for server to say that it has completed the pause.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestPause(aaudioStream));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_PAUSING,
+ &state,
+ DEFAULT_STATE_TIMEOUT));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_PAUSED, state);
+ }
+
+ // Make sure the read counter is not advancing when we are paused.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+ ASSERT_GE(aaudioFramesRead, aaudioFramesRead2); // monotonic increase
+
+ // Use this to sleep by waiting for something that won't happen.
+ AAudioStream_waitForStateChange(aaudioStream, AAUDIO_STREAM_STATE_PAUSED, &state, timeoutNanos);
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead2));
+ EXPECT_EQ(aaudioFramesRead, aaudioFramesRead2);
+
+ // ------------------- TEST FLUSH -----------------
+ // Prime the buffer.
+ timeoutNanos = 0;
+ writeLoops = 100;
+ do {
+ framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+ framesTotal += framesWritten;
+ } while (framesWritten > 0 && writeLoops-- > 0);
+ EXPECT_EQ(0, framesWritten);
+
+ // Flush and wait for server to respond.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestFlush(aaudioStream));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_FLUSHING,
+ &state,
+ DEFAULT_STATE_TIMEOUT));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_FLUSHED, state);
+
+ // After a flush, the read counter should be caught up with the write counter.
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesWritten(aaudioStream, &aaudioFramesWritten));
+ EXPECT_EQ(framesTotal, aaudioFramesWritten);
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+ EXPECT_EQ(aaudioFramesRead, aaudioFramesWritten);
+
+ // The buffer should be empty after a flush so we should be able to write.
+ framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+ // There should be some room for priming the buffer.
+ ASSERT_TRUE(framesWritten > 0 && framesWritten <= framesPerBurst);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+// Test Writing to an AAudioStream using LEGACY sharing mode.
+TEST(test_aaudio_api, aaudio_stream_legacy) {
+ runtest_aaudio_stream(AAUDIO_SHARING_MODE_LEGACY);
+}
+
+// Test Writing to an AAudioStream using EXCLUSIVE sharing mode.
+TEST(test_aaudio_api, aaudio_stream_exclusive) {
+ runtest_aaudio_stream(AAUDIO_SHARING_MODE_EXCLUSIVE);
+}
+
+#define AAUDIO_THREAD_ANSWER 1826375
+#define AAUDIO_THREAD_DURATION_MSEC 500
+
+static void *TestAAudioStreamThreadProc(void *arg) {
+ AAudioStream aaudioStream = (AAudioStream) reinterpret_cast<size_t>(arg);
+ aaudio_stream_state_t state;
+
+ // Use this to sleep by waiting for something that won't happen.
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_getState(aaudioStream, &state));
+ AAudioStream_waitForStateChange(aaudioStream, AAUDIO_STREAM_STATE_PAUSED, &state,
+ AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND);
+ return reinterpret_cast<void *>(AAUDIO_THREAD_ANSWER);
+}
+
+// Test creating a stream related thread.
+TEST(test_aaudio_api, aaudio_stream_thread_basic) {
+ AAudioStreamBuilder aaudioBuilder;
+ AAudioStream aaudioStream;
+ aaudio_result_t result = AAUDIO_OK;
+ void *threadResult;
+
+ // Use an AAudioStreamBuilder to define the stream.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+ // Start a thread.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_createThread(aaudioStream,
+ 10 * AAUDIO_NANOS_PER_MILLISECOND,
+ TestAAudioStreamThreadProc,
+ reinterpret_cast<void *>(aaudioStream)));
+ // Thread already started.
+ ASSERT_NE(AAUDIO_OK, AAudioStream_createThread(aaudioStream, // should fail!
+ 10 * AAUDIO_NANOS_PER_MILLISECOND,
+ TestAAudioStreamThreadProc,
+ reinterpret_cast<void *>(aaudioStream)));
+
+ // Wait for the thread to finish.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_joinThread(aaudioStream,
+ &threadResult, 2 * AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND));
+ // The thread returns a special answer.
+ ASSERT_EQ(AAUDIO_THREAD_ANSWER, (int)reinterpret_cast<size_t>(threadResult));
+
+ // Thread should already be joined.
+ ASSERT_NE(AAUDIO_OK, AAudioStream_joinThread(aaudioStream, // should fail!
+ &threadResult, 2 * AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND));
+
+ // Cleanup
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
diff --git a/media/liboboe/tests/test_handle_tracker.cpp b/media/libaaudio/tests/test_handle_tracker.cpp
similarity index 91%
rename from media/liboboe/tests/test_handle_tracker.cpp
rename to media/libaaudio/tests/test_handle_tracker.cpp
index a146e76..e51c39c 100644
--- a/media/liboboe/tests/test_handle_tracker.cpp
+++ b/media/libaaudio/tests/test_handle_tracker.cpp
@@ -14,18 +14,18 @@
* limitations under the License.
*/
-// Unit tests for Oboe Handle Tracker
+// Unit tests for AAudio Handle Tracker
#include <stdlib.h>
#include <math.h>
#include <gtest/gtest.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include "HandleTracker.h"
// Test adding one address.
-TEST(test_handle_tracker, oboe_handle_tracker) {
+TEST(test_handle_tracker, aaudio_handle_tracker) {
const int MAX_HANDLES = 4;
HandleTracker tracker(MAX_HANDLES);
handle_tracker_type_t type = 3; // arbitrary generic type
@@ -40,7 +40,7 @@
EXPECT_EQ(nullptr, found);
// create a valid handle and use it to lookup the object again
- oboe_handle_t dataHandle = tracker.put(type, &data);
+ aaudio_handle_t dataHandle = tracker.put(type, &data);
ASSERT_TRUE(dataHandle > 0);
found = tracker.get(type, dataHandle);
EXPECT_EQ(&data, found);
@@ -61,12 +61,12 @@
}
// Test filling the tracker.
-TEST(test_handle_tracker, oboe_full_up) {
+TEST(test_handle_tracker, aaudio_full_up) {
const int MAX_HANDLES = 5;
HandleTracker tracker(MAX_HANDLES);
handle_tracker_type_t type = 4; // arbitrary generic type
int data[MAX_HANDLES];
- oboe_handle_t handles[MAX_HANDLES];
+ aaudio_handle_t handles[MAX_HANDLES];
handle_tracker_address_t found;
// repeat the test several times to see if it breaks
@@ -81,7 +81,7 @@
}
// Now that it is full, try to add one more.
- oboe_handle_t handle = tracker.put(type, &data[0]);
+ aaudio_handle_t handle = tracker.put(type, &data[0]);
EXPECT_TRUE(handle < 0);
for (int i = 0; i < MAX_HANDLES; i++) {
diff --git a/media/liboboe/tests/test_marshalling.cpp b/media/libaaudio/tests/test_marshalling.cpp
similarity index 82%
rename from media/liboboe/tests/test_marshalling.cpp
rename to media/libaaudio/tests/test_marshalling.cpp
index 8f4cc2c..b1f77c0 100644
--- a/media/liboboe/tests/test_marshalling.cpp
+++ b/media/libaaudio/tests/test_marshalling.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-// Unit tests for Oboe Marshalling of RingBuffer information.
+// Unit tests for AAudio Marshalling of RingBuffer information.
#include <stdlib.h>
#include <math.h>
@@ -25,14 +25,14 @@
#include <gtest/gtest.h>
#include <sys/mman.h>
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
#include <binding/AudioEndpointParcelable.h>
using namespace android;
-using namespace oboe;
+using namespace aaudio;
// Test adding one value.
-TEST(test_marshalling, oboe_one_read_write) {
+TEST(test_marshalling, aaudio_one_read_write) {
Parcel parcel;
size_t pos = parcel.dataPosition();
const int arbitraryValue = 235;
@@ -44,7 +44,7 @@
}
// Test SharedMemoryParcel.
-TEST(test_marshalling, oboe_shared_memory) {
+TEST(test_marshalling, aaudio_shared_memory) {
SharedMemoryParcelable sharedMemoryA;
SharedMemoryParcelable sharedMemoryB;
const size_t memSizeBytes = 840;
@@ -52,10 +52,10 @@
ASSERT_LE(0, fd);
sharedMemoryA.setup(fd, memSizeBytes);
void *region1;
- EXPECT_EQ(OBOE_OK, sharedMemoryA.resolve(0, 16, ®ion1)); // fits in region
- EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(-2, 16, ®ion1)); // offset is negative
- EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, ®ion1)); // size too big
- EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, ®ion1)); // goes past the end
+ EXPECT_EQ(AAUDIO_OK, sharedMemoryA.resolve(0, 16, ®ion1)); // fits in region
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(-2, 16, ®ion1)); // offset is negative
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, ®ion1)); // size too big
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, ®ion1)); // goes past the end
int32_t *buffer1 = (int32_t *)region1;
buffer1[0] = 98735; // arbitrary value
@@ -69,14 +69,14 @@
// should see same value at two different addresses
void *region2;
- EXPECT_EQ(OBOE_OK, sharedMemoryB.resolve(0, 16, ®ion2));
+ EXPECT_EQ(AAUDIO_OK, sharedMemoryB.resolve(0, 16, ®ion2));
int32_t *buffer2 = (int32_t *)region2;
EXPECT_NE(buffer1, buffer2);
EXPECT_EQ(buffer1[0], buffer2[0]);
}
// Test SharedRegionParcel.
-TEST(test_marshalling, oboe_shared_region) {
+TEST(test_marshalling, aaudio_shared_region) {
SharedMemoryParcelable sharedMemories[2];
SharedRegionParcelable sharedRegionA;
SharedRegionParcelable sharedRegionB;
@@ -89,7 +89,7 @@
sharedRegionA.setup(0, regionOffset1, regionSize1);
void *region1;
- EXPECT_EQ(OBOE_OK, sharedRegionA.resolve(sharedMemories, ®ion1));
+ EXPECT_EQ(AAUDIO_OK, sharedRegionA.resolve(sharedMemories, ®ion1));
int32_t *buffer1 = (int32_t *)region1;
buffer1[0] = 336677; // arbitrary value
@@ -102,13 +102,13 @@
// should see same value
void *region2;
- EXPECT_EQ(OBOE_OK, sharedRegionB.resolve(sharedMemories, ®ion2));
+ EXPECT_EQ(AAUDIO_OK, sharedRegionB.resolve(sharedMemories, ®ion2));
int32_t *buffer2 = (int32_t *)region2;
EXPECT_EQ(buffer1[0], buffer2[0]);
}
// Test RingBufferParcelable.
-TEST(test_marshalling, oboe_ring_buffer_parcelable) {
+TEST(test_marshalling, aaudio_ring_buffer_parcelable) {
SharedMemoryParcelable sharedMemories[2];
RingBufferParcelable ringBufferA;
RingBufferParcelable ringBufferB;
@@ -136,7 +136,7 @@
// setup A
RingBufferDescriptor descriptorA;
- EXPECT_EQ(OBOE_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
+ EXPECT_EQ(AAUDIO_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
descriptorA.dataAddress[0] = 95;
descriptorA.dataAddress[1] = 57;
descriptorA.readCounterAddress[0] = 17;
@@ -152,7 +152,7 @@
ringBufferB.readFromParcel(&parcel);
RingBufferDescriptor descriptorB;
- EXPECT_EQ(OBOE_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
+ EXPECT_EQ(AAUDIO_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
// A and B should match
EXPECT_EQ(descriptorA.dataAddress[0], descriptorB.dataAddress[0]);
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
index 5e00b77..617eb35 100644
--- a/media/libaudiohal/Android.mk
+++ b/media/libaudiohal/Android.mk
@@ -46,7 +46,8 @@
android.hardware.audio.common@2.0-util \
android.hardware.audio.effect@2.0 \
android.hidl.memory@1.0 \
- libmedia_helper
+ libmedia_helper \
+ libmediautils
endif # USE_LEGACY_LOCAL_AUDIO_HAL
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
index 7e6ee85..9fe2c7b 100644
--- a/media/libaudiohal/EffectBufferHalLocal.cpp
+++ b/media/libaudiohal/EffectBufferHalLocal.cpp
@@ -75,10 +75,10 @@
void EffectBufferHalLocal::commit() {
}
-void EffectBufferHalLocal::update(size_t size) {
+void EffectBufferHalLocal::update(size_t) {
}
-void EffectBufferHalLocal::commit(size_t size) {
+void EffectBufferHalLocal::commit(size_t) {
}
} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
index 1ab5dad..ad12654 100644
--- a/media/libaudiohal/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -17,7 +17,9 @@
#define LOG_TAG "EffectsFactoryHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hidl/memory/1.0/IAllocator.h>
#include <cutils/native_handle.h>
+#include <hidl/ServiceManagement.h>
#include <media/EffectsFactoryApi.h>
#include "ConversionHelperHidl.h"
@@ -45,6 +47,10 @@
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory"){
mEffectsFactory = IEffectsFactory::getService("audio_effects_factory");
+ // TODO: Waiting should not be needed (b/34772726).
+ // Also remove include of IAllocator.h and ServiceManagement.h
+ android::hardware::details::waitForHwService(
+ hidl::memory::V1_0::IAllocator::descriptor, "ashmem");
}
EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
diff --git a/media/libaudiohal/StreamHalHidl.cpp b/media/libaudiohal/StreamHalHidl.cpp
index 9ee8fa8..4054aaa 100644
--- a/media/libaudiohal/StreamHalHidl.cpp
+++ b/media/libaudiohal/StreamHalHidl.cpp
@@ -14,12 +14,11 @@
* limitations under the License.
*/
-#include <time.h>
-
#define LOG_TAG "StreamHalHidl"
//#define LOG_NDEBUG 0
#include <android/hardware/audio/2.0/IStreamOutCallback.h>
+#include <mediautils/SchedulingPolicyService.h>
#include <utils/Log.h>
#include "DeviceHalHidl.h"
@@ -28,6 +27,7 @@
using ::android::hardware::audio::common::V2_0::AudioChannelMask;
using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::audio::common::V2_0::ThreadInfo;
using ::android::hardware::audio::V2_0::AudioDrain;
using ::android::hardware::audio::V2_0::IStreamOutCallback;
using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
@@ -35,18 +35,18 @@
using ::android::hardware::audio::V2_0::MmapPosition;
using ::android::hardware::audio::V2_0::ParameterValue;
using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::audio::V2_0::ThreadPriority;
using ::android::hardware::audio::V2_0::TimeSpec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
+using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
namespace android {
StreamHalHidl::StreamHalHidl(IStream *stream)
: ConversionHelperHidl("Stream"),
- mHalThreadPriority(static_cast<int>(ThreadPriority::NORMAL)),
- mStream(stream) {
+ mStream(stream),
+ mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT) {
}
StreamHalHidl::~StreamHalHidl() {
@@ -188,6 +188,19 @@
return OK;
}
+bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+ if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
+ return true;
+ }
+ int err = requestPriority(
+ threadPid, threadId,
+ mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+ ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
+ mHalThreadPriority, threadPid, threadId, err);
+ // Audio will still work, but latency will be higher and sometimes unacceptable.
+ return err == 0;
+}
+
namespace {
/* Notes on callback ownership.
@@ -241,8 +254,7 @@
} // namespace
StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mEfGroup(nullptr),
- mGetPresentationPositionNotSupported(false), mPPosFromWrite{ 0, OK, 0, { 0, 0 } } {
+ : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
}
StreamOutHalHidl::~StreamOutHalHidl() {
@@ -265,7 +277,15 @@
status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
if (mStream == 0) return NO_INIT;
- return processReturn("getLatency", mStream->getLatency(), latency);
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *latency = writeStatus.reply.latencyMs;
+ });
+ } else {
+ return processReturn("getLatency", mStream->getLatency(), latency);
+ }
}
status_t StreamOutHalHidl::setVolume(float left, float right) {
@@ -288,10 +308,30 @@
return status;
}
- const size_t availBytes = mDataMQ->availableToWrite();
- if (bytes > availBytes) { bytes = availBytes; }
- if (!mDataMQ->write(static_cast<const uint8_t*>(buffer), bytes)) {
- ALOGW("data message queue write failed");
+ return callWriterThread(
+ WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
+ [&] (const WriteStatus& writeStatus) {
+ *written = writeStatus.reply.written;
+ });
+}
+
+status_t StreamOutHalHidl::callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
+ if (!mCommandMQ->write(&cmd)) {
+ ALOGE("command message queue write failed for \"%s\"", cmdName);
+ return -EAGAIN;
+ }
+ if (data != nullptr) {
+ size_t availableToWrite = mDataMQ->availableToWrite();
+ if (dataSize > availableToWrite) {
+ ALOGW("truncating write data from %d to %d due to insufficient data queue space",
+ (int32_t)dataSize, (int32_t)availableToWrite);
+ dataSize = availableToWrite;
+ }
+ if (!mDataMQ->write(data, dataSize)) {
+ ALOGE("data message queue write failed for \"%s\"", cmdName);
+ }
}
mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
@@ -301,24 +341,18 @@
status_t ret = mEfGroup->wait(
static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState, NS_PER_SEC);
if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
- WriteStatus writeStatus =
- { Result::NOT_INITIALIZED, 0, Result::NOT_INITIALIZED, 0, { 0, 0 } };
- mStatusMQ->read(&writeStatus);
- if (writeStatus.writeRetval == Result::OK) {
- status = OK;
- *written = writeStatus.written;
- mPPosFromWrite.status = processReturn(
- "get_presentation_position", writeStatus.presentationPositionRetval);
- if (mPPosFromWrite.status == OK) {
- mPPosFromWrite.frames = writeStatus.frames;
- mPPosFromWrite.ts.tv_sec = writeStatus.timeStamp.tvSec;
- mPPosFromWrite.ts.tv_nsec = writeStatus.timeStamp.tvNSec;
- }
- mPPosFromWrite.obtained = getCurrentTimeMs();
- } else {
- status = processReturn("write", writeStatus.writeRetval);
+ WriteStatus writeStatus;
+ writeStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&writeStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
}
- return status;
+ if (writeStatus.retval == Result::OK) {
+ ret = OK;
+ callback(writeStatus);
+ } else {
+ ret = processReturn(cmdName, writeStatus.retval);
+ }
+ return ret;
}
if (ret == -EAGAIN) {
// This normally retries no more than once.
@@ -327,35 +361,41 @@
return ret;
}
-uint64_t StreamOutHalHidl::getCurrentTimeMs() {
- struct timespec timeNow;
- clock_gettime(CLOCK_MONOTONIC, &timeNow);
- return timeNow.tv_sec * 1000000 + timeNow.tv_nsec / 1000;
-}
-
status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
std::unique_ptr<DataMQ> tempDataMQ;
std::unique_ptr<StatusMQ> tempStatusMQ;
Result retval;
+ pid_t halThreadPid, halThreadTid;
Return<void> ret = mStream->prepareForWriting(
- 1, bufferSize, ThreadPriority(mHalThreadPriority),
+ 1, bufferSize,
[&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ) {
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
retval = r;
if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
tempDataMQ.reset(new DataMQ(dataMQ));
tempStatusMQ.reset(new StatusMQ(statusMQ));
if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
}
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
}
});
if (!ret.isOk() || retval != Result::OK) {
return processReturn("prepareForWriting", ret, retval);
}
- if (!tempDataMQ || !tempDataMQ->isValid() || !tempStatusMQ || !tempStatusMQ->isValid()
- || !mEfGroup) {
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
@@ -364,8 +404,12 @@
ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
return NO_INIT;
}
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
mDataMQ = std::move(tempDataMQ);
mStatusMQ = std::move(tempStatusMQ);
+ mWriterClient = gettid();
return OK;
}
@@ -443,31 +487,27 @@
status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
if (mStream == 0) return NO_INIT;
- if (mGetPresentationPositionNotSupported) return INVALID_OPERATION;
- if (getCurrentTimeMs() - mPPosFromWrite.obtained <= 1000) {
- // No more than 1 ms passed since the last write, use cached result to avoid binder calls.
- if (mPPosFromWrite.status == OK) {
- *frames = mPPosFromWrite.frames;
- timestamp->tv_sec = mPPosFromWrite.ts.tv_sec;
- timestamp->tv_nsec = mPPosFromWrite.ts.tv_nsec;
- }
- return mPPosFromWrite.status;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *frames = writeStatus.reply.presentationPosition.frames;
+ timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
+ timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getPresentationPosition(
+ [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ timestamp->tv_sec = hidlTimeStamp.tvSec;
+ timestamp->tv_nsec = hidlTimeStamp.tvNSec;
+ }
+ });
+ return processReturn("getPresentationPosition", ret, retval);
}
-
- Result retval;
- Return<void> ret = mStream->getPresentationPosition(
- [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- timestamp->tv_sec = hidlTimeStamp.tvSec;
- timestamp->tv_nsec = hidlTimeStamp.tvNSec;
- }
- });
- if (ret.isOk() && retval == Result::NOT_SUPPORTED) {
- mGetPresentationPositionNotSupported = true;
- }
- return processReturn("getPresentationPosition", ret, retval);
}
void StreamOutHalHidl::onWriteReady() {
@@ -493,7 +533,7 @@
StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mEfGroup(nullptr) {
+ : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
}
StreamInHalHidl::~StreamInHalHidl() {
@@ -525,33 +565,53 @@
}
status_t status;
- if (!mDataMQ) {
- if ((status = prepareForReading(bytes)) != OK) return status;
- // Trigger the first read.
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+ if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
+ return status;
}
+ ReadParameters params;
+ params.command = ReadCommand::READ;
+ params.params.read = bytes;
+ return callReaderThread(params, "read",
+ [&](const ReadStatus& readStatus) {
+ const size_t availToRead = mDataMQ->availableToRead();
+ if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
+ ALOGE("data message queue read failed for \"read\"");
+ }
+ ALOGW_IF(availToRead != readStatus.reply.read,
+ "HAL read report inconsistent: mq = %d, status = %d",
+ (int32_t)availToRead, (int32_t)readStatus.reply.read);
+ *read = readStatus.reply.read;
+ });
+}
+
+status_t StreamInHalHidl::callReaderThread(
+ const ReadParameters& params, const char* cmdName,
+ StreamInHalHidl::ReaderCallback callback) {
+ if (!mCommandMQ->write(¶ms)) {
+ ALOGW("command message queue write failed");
+ return -EAGAIN;
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+
// TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
uint32_t efState = 0;
retry:
status_t ret = mEfGroup->wait(
static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState, NS_PER_SEC);
if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
- ReadStatus readStatus = { Result::NOT_INITIALIZED, 0 };
- const size_t availToRead = mDataMQ->availableToRead();
- if (bytes > availToRead) { bytes = availToRead; }
- mDataMQ->read(static_cast<uint8_t*>(buffer), bytes);
- mStatusMQ->read(&readStatus);
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
- if (readStatus.retval == Result::OK) {
- ALOGW_IF(availToRead != readStatus.read,
- "HAL read report inconsistent: mq = %d, status = %d",
- (int32_t)availToRead, (int32_t)readStatus.read);
- *read = readStatus.read;
- } else {
- status = processReturn("read", readStatus.retval);
+ ReadStatus readStatus;
+ readStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&readStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
}
- return status;
+ if (readStatus.retval == Result::OK) {
+ ret = OK;
+ callback(readStatus);
+ } else {
+ ret = processReturn(cmdName, readStatus.retval);
+ }
+ return ret;
}
if (ret == -EAGAIN) {
// This normally retries no more than once.
@@ -561,28 +621,40 @@
}
status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
std::unique_ptr<DataMQ> tempDataMQ;
std::unique_ptr<StatusMQ> tempStatusMQ;
Result retval;
+ pid_t halThreadPid, halThreadTid;
Return<void> ret = mStream->prepareForReading(
- 1, bufferSize, ThreadPriority(mHalThreadPriority),
+ 1, bufferSize,
[&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ) {
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
retval = r;
if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
tempDataMQ.reset(new DataMQ(dataMQ));
tempStatusMQ.reset(new StatusMQ(statusMQ));
if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
}
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
}
});
if (!ret.isOk() || retval != Result::OK) {
return processReturn("prepareForReading", ret, retval);
}
- if (!tempDataMQ || !tempDataMQ->isValid() || !tempStatusMQ || !tempStatusMQ->isValid()
- || !mEfGroup) {
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
@@ -591,8 +663,12 @@
ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
return NO_INIT;
}
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
mDataMQ = std::move(tempDataMQ);
mStatusMQ = std::move(tempStatusMQ);
+ mReaderClient = gettid();
return OK;
}
@@ -603,16 +679,26 @@
status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getCapturePosition(
- [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- *time = hidlTime;
- }
- });
- return processReturn("getCapturePosition", ret, retval);
+ if (mReaderClient == gettid() && mCommandMQ) {
+ ReadParameters params;
+ params.command = ReadCommand::GET_CAPTURE_POSITION;
+ return callReaderThread(params, "getCapturePosition",
+ [&](const ReadStatus& readStatus) {
+ *frames = readStatus.reply.capturePosition.frames;
+ *time = readStatus.reply.capturePosition.time;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getCapturePosition(
+ [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ *time = hidlTime;
+ }
+ });
+ return processReturn("getCapturePosition", ret, retval);
+ }
}
} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.h b/media/libaudiohal/StreamHalHidl.h
index 8b5867e..a7df276 100644
--- a/media/libaudiohal/StreamHalHidl.h
+++ b/media/libaudiohal/StreamHalHidl.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_H
#define ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+#include <atomic>
+
#include <android/hardware/audio/2.0/IStream.h>
#include <android/hardware/audio/2.0/IStreamIn.h>
#include <android/hardware/audio/2.0/IStreamOut.h>
@@ -32,7 +34,9 @@
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
using ::android::hardware::Return;
+using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
namespace android {
@@ -97,10 +101,12 @@
// The destructor automatically closes the stream.
virtual ~StreamHalHidl();
- int mHalThreadPriority;
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
private:
+ const int HAL_THREAD_PRIORITY_DEFAULT = -1;
IStream *mStream;
+ int mHalThreadPriority;
};
class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
@@ -155,28 +161,27 @@
private:
friend class DeviceHalHidl;
+ typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
wp<StreamOutHalInterfaceCallback> mCallback;
sp<IStreamOut> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
std::unique_ptr<DataMQ> mDataMQ;
std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mWriterClient;
EventFlag* mEfGroup;
- bool mGetPresentationPositionNotSupported;
- struct {
- uint64_t obtained;
- status_t status;
- uint64_t frames;
- struct timespec ts;
- } mPPosFromWrite;
// Can not be constructed directly by clients.
StreamOutHalHidl(const sp<IStreamOut>& stream);
virtual ~StreamOutHalHidl();
- uint64_t getCurrentTimeMs();
+ using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
+ status_t callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, WriterCallback callback);
status_t prepareForWriting(size_t bufferSize);
};
@@ -200,12 +205,15 @@
private:
friend class DeviceHalHidl;
+ typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
sp<IStreamIn> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
std::unique_ptr<DataMQ> mDataMQ;
std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mReaderClient;
EventFlag* mEfGroup;
// Can not be constructed directly by clients.
@@ -213,6 +221,9 @@
virtual ~StreamInHalHidl();
+ using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
+ status_t callReaderThread(
+ const ReadParameters& params, const char* cmdName, ReaderCallback callback);
status_t prepareForReading(size_t bufferSize);
};
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index 0f4f092..bfc43a6 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -36,7 +36,8 @@
FLAGS,
GETDRMTRACKINFO,
SETUID,
- NAME
+ NAME,
+ GETMETRICS
};
class BpMediaExtractor : public BpInterface<IMediaExtractor> {
@@ -94,6 +95,16 @@
return NULL;
}
+ virtual status_t getMetrics(Parcel * reply) {
+ Parcel data;
+ data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+ status_t ret = remote()->transact(GETMETRICS, data, reply);
+ if (ret == NO_ERROR) {
+ return OK;
+ }
+ return UNKNOWN_ERROR;
+ }
+
virtual uint32_t flags() const {
ALOGV("flags NOT IMPLEMENTED");
return 0;
@@ -169,6 +180,11 @@
}
return UNKNOWN_ERROR;
}
+ case GETMETRICS: {
+ CHECK_INTERFACE(IMediaExtractor, data, reply);
+ status_t ret = getMetrics(reply);
+ return ret;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
index c8995c9..6d54a13 100644
--- a/media/libmedia/OMXBuffer.cpp
+++ b/media/libmedia/OMXBuffer.cpp
@@ -90,6 +90,13 @@
case kBufferTypeANWBuffer:
{
+ if (mGraphicBuffer == NULL) {
+ return parcel->writeBool(false);
+ }
+ status_t err = parcel->writeBool(true);
+ if (err != OK) {
+ return err;
+ }
return parcel->write(*mGraphicBuffer);
}
@@ -130,15 +137,21 @@
case kBufferTypeANWBuffer:
{
- sp<GraphicBuffer> buffer = new GraphicBuffer();
-
- status_t err = parcel->read(*buffer);
-
+ bool notNull;
+ status_t err = parcel->readBool(¬Null);
if (err != OK) {
return err;
}
-
- mGraphicBuffer = buffer;
+ if (notNull) {
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ status_t err = parcel->read(*buffer);
+ if (err != OK) {
+ return err;
+ }
+ mGraphicBuffer = buffer;
+ } else {
+ mGraphicBuffer = nullptr;
+ }
break;
}
@@ -166,7 +179,7 @@
mMem = std::move(source.mMem);
mGraphicBuffer = std::move(source.mGraphicBuffer);
mNativeHandle = std::move(source.mNativeHandle);
- mHidlMemory = source.mHidlMemory; // TODO(b/34093434): Use move when available
+ mHidlMemory = std::move(source.mHidlMemory);
return *this;
}
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 25c29f2..bead69a 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -111,6 +111,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
TERMINATOR
};
@@ -122,6 +123,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
TERMINATOR
};
@@ -186,6 +188,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_SBC),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_HD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC4),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LDAC),
TERMINATOR
};
@@ -211,6 +214,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_6),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 3199495..2d4c475 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1318,16 +1318,33 @@
}
sp<IMediaPlayerClient> c;
+ sp<Client> nextClient;
+ status_t errStartNext = NO_ERROR;
{
Mutex::Autolock l(client->mLock);
c = client->mClient;
if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
+ nextClient = client->mNextClient;
+
if (client->mAudioOutput != NULL)
client->mAudioOutput->switchToNextOutput();
- client->mNextClient->start();
- if (client->mNextClient->mClient != NULL) {
- client->mNextClient->mClient->notify(
- MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+
+ errStartNext = nextClient->start();
+ }
+ }
+
+ if (nextClient != NULL) {
+ sp<IMediaPlayerClient> nc;
+ {
+ Mutex::Autolock l(nextClient->mLock);
+ nc = nextClient->mClient;
+ }
+ if (nc != NULL) {
+ if (errStartNext == NO_ERROR) {
+ nc->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+ } else {
+ nc->notify(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN , 0, obj);
+ ALOGE("gapless:start playback for next track failed, err(%d)", errStartNext);
}
}
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 9ce65c4..d00e377 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -275,6 +275,7 @@
}
status_t StagefrightRecorder::setNextOutputFile(int fd) {
+ Mutex::Autolock autolock(mLock);
// Only support MPEG4
if (mOutputFormat != OUTPUT_FORMAT_MPEG_4) {
ALOGE("Only MP4 file format supports setting next output file");
@@ -290,6 +291,10 @@
// start with a clean, empty file
ftruncate(fd, 0);
int nextFd = dup(fd);
+ if (mWriter == NULL) {
+ ALOGE("setNextOutputFile failed. Writer has been freed");
+ return INVALID_OPERATION;
+ }
return mWriter->setNextFd(nextFd);
}
@@ -851,6 +856,8 @@
}
status_t StagefrightRecorder::prepare() {
+ ALOGV("prepare");
+ Mutex::Autolock autolock(mLock);
if (mVideoSource == VIDEO_SOURCE_SURFACE) {
return prepareInternal();
}
@@ -859,6 +866,7 @@
status_t StagefrightRecorder::start() {
ALOGV("start");
+ Mutex::Autolock autolock(mLock);
if (mOutputFd < 0) {
ALOGE("Output file descriptor is invalid");
return INVALID_OPERATION;
@@ -1867,6 +1875,7 @@
status_t StagefrightRecorder::stop() {
ALOGV("stop");
+ Mutex::Autolock autolock(mLock);
status_t err = OK;
if (mCaptureFpsEnable && mCameraSourceTimeLapse != NULL) {
@@ -1984,6 +1993,7 @@
status_t StagefrightRecorder::dump(
int fd, const Vector<String16>& args) const {
ALOGV("dump");
+ Mutex::Autolock autolock(mLock);
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 870c5d0..b7d0b0e 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -43,7 +43,6 @@
struct StagefrightRecorder : public MediaRecorderBase {
explicit StagefrightRecorder(const String16 &opPackageName);
virtual ~StagefrightRecorder();
-
virtual status_t init();
virtual status_t setAudioSource(audio_source_t as);
virtual status_t setVideoSource(video_source vs);
@@ -73,6 +72,7 @@
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
private:
+ mutable Mutex mLock;
sp<hardware::ICamera> mCamera;
sp<ICameraRecordingProxy> mCameraProxy;
sp<IGraphicBufferProducer> mPreviewSurface;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 1d62498..0a0a8aa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -634,6 +634,11 @@
sp<MediaCodecBuffer> buffer;
mCodec->getOutputBuffer(index, &buffer);
+ if (buffer == NULL) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
if (index >= mOutputBuffers.size()) {
for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
mOutputBuffers.add();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 42e95da..9350440 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -405,10 +405,10 @@
msg->setInt32("flags", flags);
sp<AMessage> response;
- msg->postAndAwaitResponse(&response);
+ status_t postStatus = msg->postAndAwaitResponse(&response);
int32_t err;
- if (!response->findInt32("err", &err)) {
+ if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
err = INVALID_OPERATION;
} else if (err == OK && isOffloaded != NULL) {
int32_t offload;
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index f019df5..7c2523d 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -105,6 +105,7 @@
if (!mEnabled) {
return;
}
+ LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
size_t length = strlen(string);
if (length > Entry::kMaxLength) {
length = Entry::kMaxLength;
@@ -147,16 +148,136 @@
}
struct timespec ts;
if (!clock_gettime(CLOCK_MONOTONIC, &ts)) {
- log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
}
}
-void NBLog::Writer::logTimestamp(const struct timespec& ts)
+void NBLog::Writer::logTimestamp(const struct timespec &ts)
{
if (!mEnabled) {
return;
}
- log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+}
+
+void NBLog::Writer::logInteger(const int x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_INTEGER, &x, sizeof(x));
+}
+
+void NBLog::Writer::logFloat(const float x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_FLOAT, &x, sizeof(x));
+}
+
+void NBLog::Writer::logPID()
+{
+ if (!mEnabled) {
+ return;
+ }
+ pid_t id = ::getpid();
+ // TODO: append process name to pid
+ // const char* path = sprintf("/proc/%d/status", id);
+ // FILE* f = fopen(path);
+ // size_t length = 30
+ // char buffer[length];
+ // getline(&buffer, &length, f);
+ // char* pidTag = sprintf("")
+ log(EVENT_PID, &id, sizeof(pid_t));
+}
+
+void NBLog::Writer::logStart(const char *fmt)
+{
+ if (!mEnabled) {
+ return;
+ }
+ size_t length = strlen(fmt);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_START_FMT, fmt, length);
+}
+
+void NBLog::Writer::logEnd()
+{
+ if (!mEnabled) {
+ return;
+ }
+ Entry entry = Entry(EVENT_END_FMT, NULL, 0);
+ log(&entry, true);
+}
+
+void NBLog::Writer::logFormat(const char *fmt, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logVFormat(fmt, ap);
+ va_end(ap);
+}
+
+void NBLog::Writer::logVFormat(const char *fmt, va_list argp)
+{
+ if (!mEnabled) {
+ return;
+ }
+ Writer::logStart(fmt);
+ int i;
+ double f;
+ char* s;
+ struct timespec t;
+ Writer::logTimestamp();
+ for (const char *p = fmt; *p != '\0'; p++) {
+ // TODO: implement more complex formatting such as %.3f
+ if (*p != '%') {
+ continue;
+ }
+ switch(*++p) {
+ case 's': // string
+ s = va_arg(argp, char *);
+ Writer::log(s);
+ break;
+
+ case 't': // timestamp
+ t = va_arg(argp, struct timespec);
+ Writer::logTimestamp(t);
+ break;
+
+ case 'd': // integer
+ i = va_arg(argp, int);
+ Writer::logInteger(i);
+ break;
+
+ case 'f': // float
+ f = va_arg(argp, double); // float arguments are promoted to double in vararg lists
+ Writer::logFloat((float)f);
+ break;
+
+ case 'p': // pid
+ Writer::logPID();
+ break;
+
+ // the "%\0" case finishes parsing
+ case '\0':
+ --p;
+ break;
+
+ default:
+ ALOGW("NBLog Writer parsed invalid format specifier: %c", *p);
+ break;
+ // the '%' case is handled using the formatted string in the reader
+ }
+ }
+ Writer::logEnd();
}
void NBLog::Writer::log(Event event, const void *data, size_t length)
@@ -173,6 +294,10 @@
switch (event) {
case EVENT_STRING:
case EVENT_TIMESTAMP:
+ case EVENT_INTEGER:
+ case EVENT_FLOAT:
+ case EVENT_PID:
+ case EVENT_START_FMT:
break;
case EVENT_RESERVED:
default:
@@ -257,12 +382,43 @@
Writer::logTimestamp();
}
-void NBLog::LockedWriter::logTimestamp(const struct timespec& ts)
+void NBLog::LockedWriter::logTimestamp(const struct timespec &ts)
{
Mutex::Autolock _l(mLock);
Writer::logTimestamp(ts);
}
+void NBLog::LockedWriter::logInteger(const int x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logInteger(x);
+}
+
+void NBLog::LockedWriter::logFloat(const float x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logFloat(x);
+}
+
+void NBLog::LockedWriter::logPID()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logPID();
+}
+
+void NBLog::LockedWriter::logStart(const char *fmt)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logStart(fmt);
+}
+
+
+void NBLog::LockedWriter::logEnd()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logEnd();
+}
+
bool NBLog::LockedWriter::isEnabled() const
{
Mutex::Autolock _l(mLock);
@@ -420,6 +576,22 @@
(int) (ts.tv_nsec / 1000000));
deferredTimestamp = true;
} break;
+ case EVENT_INTEGER:
+ appendInt(&body, data);
+ break;
+ case EVENT_FLOAT:
+ appendFloat(&body, data);
+ break;
+ case EVENT_PID:
+ appendPID(&body, data);
+ break;
+ case EVENT_START_FMT:
+ advance += handleFormat((const char*) ©[i + 2], length,
+ ©[i + Entry::kOverhead + length], ×tamp, &body);
+ break;
+ case EVENT_END_FMT:
+ body.appendFormat("warning: got to end format event");
+ break;
case EVENT_RESERVED:
default:
body.appendFormat("warning: unknown event %d", event);
@@ -437,7 +609,7 @@
}
}
-void NBLog::Reader::dumpLine(const String8& timestamp, String8& body)
+void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
{
if (mFd >= 0) {
dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
@@ -452,4 +624,104 @@
return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
}
+void NBLog::appendTimestamp(String8 *body, const void *data) {
+ struct timespec ts;
+ memcpy(&ts, data, sizeof(struct timespec));
+ body->appendFormat("[%d.%03d]", (int) ts.tv_sec,
+ (int) (ts.tv_nsec / 1000000));
+}
+
+void NBLog::appendInt(String8 *body, const void *data) {
+ int x = *((int*) data);
+ body->appendFormat("<%d>", x);
+}
+
+void NBLog::appendFloat(String8 *body, const void *data) {
+ float f;
+ memcpy(&f, data, sizeof(float));
+ body->appendFormat("<%f>", f);
+}
+
+void NBLog::appendPID(String8 *body, const void* data) {
+ pid_t id = *((pid_t*) data);
+ body->appendFormat("<PID: %d>", id);
+}
+
+int NBLog::handleFormat(const char *fmt, size_t fmt_length, const uint8_t *data,
+ String8 *timestamp, String8 *body) {
+ if (data[0] != EVENT_TIMESTAMP) {
+ ALOGW("NBLog Reader Expected timestamp event %d, got %d", EVENT_TIMESTAMP, data[0]);
+ }
+ struct timespec ts;
+ memcpy(&ts, &data[2], sizeof(ts));
+ timestamp->clear();
+ timestamp->appendFormat("[%d.%03d]", (int) ts.tv_sec,
+ (int) (ts.tv_nsec / 1000000));
+ size_t data_offset = Entry::kOverhead + sizeof ts;
+
+ for (size_t fmt_offset = 0; fmt_offset < fmt_length; ++fmt_offset) {
+ if (fmt[fmt_offset] != '%') {
+ body->append(&fmt[fmt_offset], 1); // TODO optimize to write consecutive strings at once
+ continue;
+ }
+ if (fmt[++fmt_offset] == '%') {
+ body->append("%");
+ continue;
+ }
+ if (fmt_offset == fmt_length) {
+ continue;
+ }
+
+ NBLog::Event event = (NBLog::Event) data[data_offset];
+ size_t length = data[data_offset + 1];
+
+ // TODO check length for event type is correct
+ if(length != data[data_offset + length + 2]) {
+ ALOGW("NBLog Reader recieved different lengths %zu and %d for event %d", length,
+ data[data_offset + length + 2], event);
+ body->append("<invalid entry>");
+ ++fmt_offset;
+ continue;
+ }
+
+ // TODO: implement more complex formatting such as %.3f
+ void * datum = (void*) &data[data_offset + 2]; // pointer to the current event data
+ switch(fmt[fmt_offset])
+ {
+ case 's': // string
+ ALOGW_IF(event != EVENT_STRING, "NBLog Reader incompatible event for string specifier: %d", event);
+ body->append((const char*) datum, length);
+ break;
+
+ case 't': // timestamp
+ ALOGW_IF(event != EVENT_TIMESTAMP, "NBLog Reader incompatible event for timestamp specifier: %d", event);
+ appendTimestamp(body, datum);
+ break;
+
+ case 'd': // integer
+ ALOGW_IF(event != EVENT_INTEGER, "NBLog Reader incompatible event for integer specifier: %d", event);
+ appendInt(body, datum);
+
+ break;
+
+ case 'f': // float
+ ALOGW_IF(event != EVENT_FLOAT, "NBLog Reader incompatible event for float specifier: %d", event);
+ appendFloat(body, datum);
+ break;
+
+ case 'p': // pid
+ ALOGW_IF(event != EVENT_PID, "NBLog Reader incompatible event for pid specifier: %d", event);
+ appendPID(body, datum);
+ break;
+
+ default:
+ ALOGW("NBLog Reader encountered unknown character %c", fmt[fmt_offset]);
+ }
+
+ data_offset += length + Entry::kOverhead;
+
+ }
+ return data_offset + Entry::kOverhead; // data offset + size of END_FMT event
+}
+
} // namespace android
diff --git a/media/liboboe/README.md b/media/liboboe/README.md
deleted file mode 100644
index 80894c6..0000000
--- a/media/liboboe/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Oboe Audio input/output API
diff --git a/media/liboboe/examples/write_sine/src/write_sine.cpp b/media/liboboe/examples/write_sine/src/write_sine.cpp
deleted file mode 100644
index 084665c..0000000
--- a/media/liboboe/examples/write_sine/src/write_sine.cpp
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Play sine waves using Oboe.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "SineGenerator.h"
-
-#define SAMPLE_RATE 48000
-#define NUM_SECONDS 10
-
-static const char *getSharingModeText(oboe_sharing_mode_t mode) {
- const char *modeText = "unknown";
- switch (mode) {
- case OBOE_SHARING_MODE_EXCLUSIVE:
- modeText = "EXCLUSIVE";
- break;
- case OBOE_SHARING_MODE_LEGACY:
- modeText = "LEGACY";
- break;
- case OBOE_SHARING_MODE_SHARED:
- modeText = "SHARED";
- break;
- case OBOE_SHARING_MODE_PUBLIC_MIX:
- modeText = "PUBLIC_MIX";
- break;
- default:
- break;
- }
- return modeText;
-}
-
-int main(int argc, char **argv)
-{
- (void)argc; // unused
-
- oboe_result_t result = OBOE_OK;
-
- const int requestedSamplesPerFrame = 2;
- int actualSamplesPerFrame = 0;
- const int requestedSampleRate = SAMPLE_RATE;
- int actualSampleRate = 0;
- const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
- oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_PCM16;
-
- const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_EXCLUSIVE;
- //const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_LEGACY;
- oboe_sharing_mode_t actualSharingMode = OBOE_SHARING_MODE_LEGACY;
-
- OboeStreamBuilder oboeBuilder = OBOE_STREAM_BUILDER_NONE;
- OboeStream oboeStream = OBOE_STREAM_NONE;
- oboe_stream_state_t state = OBOE_STREAM_STATE_UNINITIALIZED;
- oboe_size_frames_t framesPerBurst = 0;
- oboe_size_frames_t framesToPlay = 0;
- oboe_size_frames_t framesLeft = 0;
- int32_t xRunCount = 0;
- int16_t *data = nullptr;
-
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
-
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
-
- printf("%s - Play a sine wave using Oboe\n", argv[0]);
-
- // Use an OboeStreamBuilder to contain requested parameters.
- result = Oboe_createStreamBuilder(&oboeBuilder);
- if (result != OBOE_OK) {
- goto finish;
- }
-
- // Request stream properties.
- result = OboeStreamBuilder_setSampleRate(oboeBuilder, requestedSampleRate);
- if (result != OBOE_OK) {
- goto finish;
- }
- result = OboeStreamBuilder_setSamplesPerFrame(oboeBuilder, requestedSamplesPerFrame);
- if (result != OBOE_OK) {
- goto finish;
- }
- result = OboeStreamBuilder_setFormat(oboeBuilder, requestedDataFormat);
- if (result != OBOE_OK) {
- goto finish;
- }
- result = OboeStreamBuilder_setSharingMode(oboeBuilder, requestedSharingMode);
- if (result != OBOE_OK) {
- goto finish;
- }
-
- // Create an OboeStream using the Builder.
- result = OboeStreamBuilder_openStream(oboeBuilder, &oboeStream);
- printf("oboeStream 0x%08x\n", oboeStream);
- if (result != OBOE_OK) {
- goto finish;
- }
-
- result = OboeStream_getState(oboeStream, &state);
- printf("after open, state = %s\n", Oboe_convertStreamStateToText(state));
-
- // Check to see what kind of stream we actually got.
- result = OboeStream_getSampleRate(oboeStream, &actualSampleRate);
- printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
-
- sineOsc1.setup(440.0, actualSampleRate);
- sineOsc2.setup(660.0, actualSampleRate);
-
- result = OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame);
- printf("SamplesPerFrame: requested = %d, actual = %d\n",
- requestedSamplesPerFrame, actualSamplesPerFrame);
-
- result = OboeStream_getSharingMode(oboeStream, &actualSharingMode);
- printf("SharingMode: requested = %s, actual = %s\n",
- getSharingModeText(requestedSharingMode),
- getSharingModeText(actualSharingMode));
-
- // This is the number of frames that are read in one chunk by a DMA controller
- // or a DSP or a mixer.
- result = OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst);
- printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - OboeStream_getFramesPerBurst() returned %d\n", result);
- goto finish;
- }
- // Some DMA might use very short bursts of 16 frames. We don't need to write such small
- // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
- while (framesPerBurst < 48) {
- framesPerBurst *= 2;
- }
- printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
-
- OboeStream_getFormat(oboeStream, &actualDataFormat);
- printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
- // TODO handle other data formats
-
- // Allocate a buffer for the audio data.
- data = new int16_t[framesPerBurst * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = OBOE_ERROR_NO_MEMORY;
- goto finish;
- }
-
- // Start the stream.
- printf("call OboeStream_requestStart()\n");
- result = OboeStream_requestStart(oboeStream);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
- goto finish;
- }
-
- result = OboeStream_getState(oboeStream, &state);
- printf("after start, state = %s\n", Oboe_convertStreamStateToText(state));
-
- // Play for a while.
- framesToPlay = actualSampleRate * NUM_SECONDS;
- framesLeft = framesToPlay;
- while (framesLeft > 0) {
- // Render sine waves to left and right channels.
- sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
- if (actualSamplesPerFrame > 1) {
- sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
- }
-
- // Write audio data to the stream.
- oboe_nanoseconds_t timeoutNanos = 100 * OBOE_NANOS_PER_MILLISECOND;
- int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
- int actual = OboeStream_write(oboeStream, data, minFrames, timeoutNanos);
- if (actual < 0) {
- fprintf(stderr, "ERROR - OboeStream_write() returned %zd\n", actual);
- goto finish;
- } else if (actual == 0) {
- fprintf(stderr, "WARNING - OboeStream_write() returned %zd\n", actual);
- goto finish;
- }
- framesLeft -= actual;
- }
-
- result = OboeStream_getXRunCount(oboeStream, &xRunCount);
- printf("OboeStream_getXRunCount %d\n", xRunCount);
-
-finish:
- delete[] data;
- OboeStream_close(oboeStream);
- OboeStreamBuilder_delete(oboeBuilder);
- printf("exiting - Oboe result = %d = %s\n", result, Oboe_convertResultToText(result));
- return (result != OBOE_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
-}
-
diff --git a/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp b/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
deleted file mode 100644
index aedcc6e..0000000
--- a/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Play sine waves using an Oboe background thread.
-
-#include <assert.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-#include <time.h>
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "SineGenerator.h"
-
-#define NUM_SECONDS 10
-#define SHARING_MODE OBOE_SHARING_MODE_EXCLUSIVE
-//#define SHARING_MODE OBOE_SHARING_MODE_LEGACY
-
-// Prototype for a callback.
-typedef int audio_callback_proc_t(float *outputBuffer,
- oboe_size_frames_t numFrames,
- void *userContext);
-
-static void *SimpleOboePlayerThreadProc(void *arg);
-
-/**
- * Simple wrapper for Oboe that opens a default stream and then calls
- * a callback function to fill the output buffers.
- */
-class SimpleOboePlayer {
-public:
- SimpleOboePlayer() {}
- virtual ~SimpleOboePlayer() {
- close();
- };
-
- void setSharingMode(oboe_sharing_mode_t requestedSharingMode) {
- mRequestedSharingMode = requestedSharingMode;
- }
-
- /** Also known as "sample rate"
- */
- int32_t getFramesPerSecond() {
- return mFramesPerSecond;
- }
-
- int32_t getSamplesPerFrame() {
- return mSamplesPerFrame;
- }
-
- /**
- * Open a stream
- */
- oboe_result_t open(audio_callback_proc_t *proc, void *userContext) {
- mCallbackProc = proc;
- mUserContext = userContext;
- oboe_result_t result = OBOE_OK;
-
- // Use an OboeStreamBuilder to contain requested parameters.
- result = Oboe_createStreamBuilder(&mBuilder);
- if (result != OBOE_OK) return result;
-
- result = OboeStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
- if (result != OBOE_OK) goto finish1;
-
- // Open an OboeStream using the Builder.
- result = OboeStreamBuilder_openStream(mBuilder, &mStream);
- if (result != OBOE_OK) goto finish1;
-
- // Check to see what kind of stream we actually got.
- result = OboeStream_getSampleRate(mStream, &mFramesPerSecond);
- printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
- if (result != OBOE_OK) goto finish2;
- result = OboeStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
- printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
- if (result != OBOE_OK) goto finish2;
-
- // This is the number of frames that are read in one chunk by a DMA controller
- // or a DSP or a mixer.
- result = OboeStream_getFramesPerBurst(mStream, &mFramesPerBurst);
- if (result != OBOE_OK) goto finish2;
- // Some DMA might use very short bursts. We don't need to write such small
- // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
- while (mFramesPerBurst < 48) {
- mFramesPerBurst *= 2;
- }
- printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
-
- result = OboeStream_getFormat(mStream, &mDataFormat);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - OboeStream_getFormat() returned %d\n", result);
- goto finish2;
- }
-
- // Allocate a buffer for the audio data.
- mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
- if (mOutputBuffer == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = OBOE_ERROR_NO_MEMORY;
- }
-
- // If needed allocate a buffer for converting float to int16_t.
- if (mDataFormat == OBOE_AUDIO_FORMAT_PCM16) {
- mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
- if (mConversionBuffer == nullptr) {
- fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
- result = OBOE_ERROR_NO_MEMORY;
- }
- }
- return result;
-
- finish2:
- OboeStream_close(mStream);
- mStream = OBOE_HANDLE_INVALID;
- finish1:
- OboeStreamBuilder_delete(mBuilder);
- mBuilder = OBOE_HANDLE_INVALID;
- return result;
- }
-
- oboe_result_t close() {
- stop();
- OboeStream_close(mStream);
- mStream = OBOE_HANDLE_INVALID;
- OboeStreamBuilder_delete(mBuilder);
- mBuilder = OBOE_HANDLE_INVALID;
- delete mOutputBuffer;
- mOutputBuffer = nullptr;
- delete mConversionBuffer;
- mConversionBuffer = nullptr;
- return OBOE_OK;
- }
-
- // Start a thread that will call the callback proc.
- oboe_result_t start() {
- mEnabled = true;
- oboe_nanoseconds_t nanosPerBurst = mFramesPerBurst * OBOE_NANOS_PER_SECOND
- / mFramesPerSecond;
- return OboeStream_createThread(mStream, nanosPerBurst,
- SimpleOboePlayerThreadProc,
- this);
- }
-
- // Tell the thread to stop.
- oboe_result_t stop() {
- mEnabled = false;
- return OboeStream_joinThread(mStream, nullptr, 2 * OBOE_NANOS_PER_SECOND);
- }
-
- oboe_result_t callbackLoop() {
- int32_t framesWritten = 0;
- int32_t xRunCount = 0;
- oboe_result_t result = OBOE_OK;
-
- result = OboeStream_requestStart(mStream);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
- return result;
- }
-
- // Give up after several burst periods have passed.
- const int burstsPerTimeout = 8;
- oboe_nanoseconds_t nanosPerTimeout =
- burstsPerTimeout * mFramesPerBurst * OBOE_NANOS_PER_SECOND
- / mFramesPerSecond;
-
- while (mEnabled && result >= 0) {
- // Call application's callback function to fill the buffer.
- if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
- mEnabled = false;
- }
- // if needed, convert from float to int16_t PCM
- if (mConversionBuffer != nullptr) {
- int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
- for (int i = 0; i < numSamples; i++) {
- mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
- }
- // Write the application data to stream.
- result = OboeStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
- } else {
- // Write the application data to stream.
- result = OboeStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
- }
- framesWritten += result;
- if (result < 0) {
- fprintf(stderr, "ERROR - OboeStream_write() returned %zd\n", result);
- }
- }
-
- result = OboeStream_getXRunCount(mStream, &xRunCount);
- printf("OboeStream_getXRunCount %d\n", xRunCount);
-
- result = OboeStream_requestStop(mStream);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - OboeStream_requestStart() returned %d\n", result);
- return result;
- }
-
- return result;
- }
-
-private:
- OboeStreamBuilder mBuilder = OBOE_HANDLE_INVALID;
- OboeStream mStream = OBOE_HANDLE_INVALID;
- float * mOutputBuffer = nullptr;
- int16_t * mConversionBuffer = nullptr;
-
- audio_callback_proc_t * mCallbackProc = nullptr;
- void * mUserContext = nullptr;
- oboe_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
- int32_t mSamplesPerFrame = 0;
- int32_t mFramesPerSecond = 0;
- oboe_size_frames_t mFramesPerBurst = 0;
- oboe_audio_format_t mDataFormat = OBOE_AUDIO_FORMAT_PCM16;
-
- volatile bool mEnabled = false; // used to request that callback exit its loop
-};
-
-static void *SimpleOboePlayerThreadProc(void *arg) {
- SimpleOboePlayer *player = (SimpleOboePlayer *) arg;
- player->callbackLoop();
- return nullptr;
-}
-
-// Application data that gets passed to the callback.
-typedef struct SineThreadedData_s {
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
- int32_t samplesPerFrame = 0;
-} SineThreadedData_t;
-
-// Callback function that fills the audio output buffer.
-int MyCallbackProc(float *outputBuffer, int32_t numFrames, void *userContext) {
- SineThreadedData_t *data = (SineThreadedData_t *) userContext;
- // Render sine waves to left and right channels.
- data->sineOsc1.render(&outputBuffer[0], data->samplesPerFrame, numFrames);
- if (data->samplesPerFrame > 1) {
- data->sineOsc2.render(&outputBuffer[1], data->samplesPerFrame, numFrames);
- }
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- (void)argc; // unused
- SimpleOboePlayer player;
- SineThreadedData_t myData;
- oboe_result_t result;
-
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using an Oboe Thread\n", argv[0]);
-
- result = player.open(MyCallbackProc, &myData);
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - player.open() returned %d\n", result);
- goto error;
- }
- printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
- printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
- myData.sineOsc1.setup(440.0, 48000);
- myData.sineOsc1.setSweep(300.0, 2000.0, 5.0);
- myData.sineOsc2.setup(660.0, 48000);
- myData.sineOsc2.setSweep(400.0, 3000.0, 7.0);
- myData.samplesPerFrame = player.getSamplesPerFrame();
-
- result = player.start();
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - player.start() returned %d\n", result);
- goto error;
- }
-
- printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
- {
- // FIXME sleep is not an NDK API
- // sleep(NUM_SECONDS);
- const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
- (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
- }
- printf("Woke up now.\n");
-
- result = player.stop();
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - player.stop() returned %d\n", result);
- goto error;
- }
- result = player.close();
- if (result != OBOE_OK) {
- fprintf(stderr, "ERROR - player.close() returned %d\n", result);
- goto error;
- }
-
- printf("SUCCESS\n");
- return EXIT_SUCCESS;
-error:
- player.close();
- printf("exiting - Oboe result = %d = %s\n", result, Oboe_convertResultToText(result));
- return EXIT_FAILURE;
-}
-
diff --git a/media/liboboe/examples/write_sine/static/README.md b/media/liboboe/examples/write_sine/static/README.md
deleted file mode 100644
index 768f4cb..0000000
--- a/media/liboboe/examples/write_sine/static/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Makefile for building simple command line examples.
-They link with Oboe as a static library.
diff --git a/media/liboboe/include/oboe/OboeAudio.h b/media/liboboe/include/oboe/OboeAudio.h
deleted file mode 100644
index 52e3f69..0000000
--- a/media/liboboe/include/oboe/OboeAudio.h
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This is the 'C' ABI for Oboe.
- */
-#ifndef OBOE_OBOEAUDIO_H
-#define OBOE_OBOEAUDIO_H
-
-#include "OboeDefinitions.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef oboe_handle_t OboeStream;
-typedef oboe_handle_t OboeStreamBuilder;
-
-#define OBOE_STREAM_NONE ((OboeStream)OBOE_HANDLE_INVALID)
-#define OBOE_STREAM_BUILDER_NONE ((OboeStreamBuilder)OBOE_HANDLE_INVALID)
-
-/* OBOE_API will probably get defined in a Makefile for a specific platform. */
-#ifndef OBOE_API
-#define OBOE_API /* for exporting symbols */
-#endif
-
-// ============================================================
-// Audio System
-// ============================================================
-
-/**
- * @return time in the same clock domain as the timestamps
- */
-OBOE_API oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid);
-
-/**
- * The text is the ASCII symbol corresponding to the returnCode,
- * or an English message saying the returnCode is unrecognized.
- * This is intended for developers to use when debugging.
- * It is not for display to users.
- *
- * @return pointer to a text representation of an Oboe result code.
- */
-OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode);
-
-/**
- * The text is the ASCII symbol corresponding to the stream state,
- * or an English message saying the state is unrecognized.
- * This is intended for developers to use when debugging.
- * It is not for display to users.
- *
- * @return pointer to a text representation of an Oboe state.
- */
-OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state);
-
-// ============================================================
-// StreamBuilder
-// ============================================================
-
-/**
- * Create a StreamBuilder that can be used to open a Stream.
- *
- * The deviceId is initially unspecified, meaning that the current default device will be used.
- *
- * The default direction is OBOE_DIRECTION_OUTPUT.
- * The default sharing mode is OBOE_SHARING_MODE_LEGACY.
- * The data format, samplesPerFrames and sampleRate are unspecified and will be
- * chosen by the device when it is opened.
- *
- * OboeStreamBuilder_delete() must be called when you are done using the builder.
- */
-OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder);
-
-/**
- * Request an audio device identified device using an ID.
- * The ID is platform specific.
- * On Android, for example, the ID could be obtained from the Java AudioManager.
- *
- * By default, the primary device will be used.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param deviceId platform specific identifier or OBOE_DEVICE_UNSPECIFIED
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
- oboe_device_id_t deviceId);
-/**
- * Passes back requested device ID.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
- oboe_device_id_t *deviceId);
-
-/**
- * Request a sample rate in Hz.
- * The stream may be opened with a different sample rate.
- * So the application should query for the actual rate after the stream is opened.
- *
- * Technically, this should be called the "frame rate" or "frames per second",
- * because it refers to the number of complete frames transferred per second.
- * But it is traditionally called "sample rate". Se we use that term.
- *
- * Default is OBOE_UNSPECIFIED.
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
- oboe_sample_rate_t sampleRate);
-
-/**
- * Returns sample rate in Hertz (samples per second).
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
- oboe_sample_rate_t *sampleRate);
-
-
-/**
- * Request a number of samples per frame.
- * The stream may be opened with a different value.
- * So the application should query for the actual value after the stream is opened.
- *
- * Default is OBOE_UNSPECIFIED.
- *
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
- int32_t samplesPerFrame);
-
-/**
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
- int32_t *samplesPerFrame);
-
-
-/**
- * Request a sample data format, for example OBOE_AUDIO_FORMAT_PCM16.
- * The application should query for the actual format after the stream is opened.
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
- oboe_audio_format_t format);
-
-/**
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
- oboe_audio_format_t *format);
-
-/**
- * Request a mode for sharing the device.
- * The requested sharing mode may not be available.
- * So the application should query for the actual mode after the stream is opened.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param sharingMode OBOE_SHARING_MODE_LEGACY or OBOE_SHARING_MODE_EXCLUSIVE
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
- oboe_sharing_mode_t sharingMode);
-
-/**
- * Return requested sharing mode.
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
- oboe_sharing_mode_t *sharingMode);
-
-/**
- * Request the direction for a stream. The default is OBOE_DIRECTION_OUTPUT.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param direction OBOE_DIRECTION_OUTPUT or OBOE_DIRECTION_INPUT
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
- oboe_direction_t direction);
-
-/**
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param direction pointer to a variable to be set to the currently requested direction.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
- oboe_direction_t *direction);
-
-/**
- * Open a stream based on the options in the StreamBuilder.
- *
- * OboeStream_close must be called when finished with the stream to recover
- * the memory and to free the associated resources.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param stream pointer to a variable to receive the new stream handle
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_openStream(OboeStreamBuilder builder,
- OboeStream *stream);
-
-/**
- * Delete the resources associated with the StreamBuilder.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_delete(OboeStreamBuilder builder);
-
-// ============================================================
-// Stream Control
-// ============================================================
-
-/**
- * Free the resources associated with a stream created by OboeStreamBuilder_openStream()
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_close(OboeStream stream);
-
-/**
- * Asynchronously request to start playing the stream. For output streams, one should
- * write to the stream to fill the buffer before starting.
- * Otherwise it will underflow.
- * After this call the state will be in OBOE_STREAM_STATE_STARTING or OBOE_STREAM_STATE_STARTED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_requestStart(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to pause.
- * Pausing a stream will freeze the data flow but not flush any buffers.
- * Use OboeStream_Start() to resume playback after a pause.
- * After this call the state will be in OBOE_STREAM_STATE_PAUSING or OBOE_STREAM_STATE_PAUSED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_requestPause(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to flush.
- * Flushing will discard any pending data.
- * This call only works if the stream is pausing or paused. TODO review
- * Frame counters are not reset by a flush. They may be advanced.
- * After this call the state will be in OBOE_STREAM_STATE_FLUSHING or OBOE_STREAM_STATE_FLUSHED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_requestFlush(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to stop.
- * The stream will stop after all of the data currently buffered has been played.
- * After this call the state will be in OBOE_STREAM_STATE_STOPPING or OBOE_STREAM_STATE_STOPPED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_requestStop(OboeStream stream);
-
-/**
- * Query the current state, eg. OBOE_STREAM_STATE_PAUSING
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param state pointer to a variable that will be set to the current state
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state);
-
-/**
- * Wait until the current state no longer matches the input state.
- *
- * <pre><code>
- * oboe_stream_state_t currentState;
- * oboe_result_t result = OboeStream_getState(stream, ¤tState);
- * while (result == OBOE_OK && currentState != OBOE_STREAM_STATE_PAUSING) {
- * result = OboeStream_waitForStateChange(
- * stream, currentState, ¤tState, MY_TIMEOUT_NANOS);
- * }
- * </code></pre>
- *
- * @param stream A handle provided by OboeStreamBuilder_openStream()
- * @param inputState The state we want to avoid.
- * @param nextState Pointer to a variable that will be set to the new state.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
- oboe_stream_state_t inputState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds);
-
-// ============================================================
-// Stream I/O
-// ============================================================
-
-/**
- * Read data from the stream.
- *
- * The call will wait until the read is complete or until it runs out of time.
- * If timeoutNanos is zero then this call will not wait.
- *
- * Note that timeoutNanoseconds is a relative duration in wall clock time.
- * Time will not stop if the thread is asleep.
- * So it will be implemented using CLOCK_BOOTTIME.
- *
- * This call is "strong non-blocking" unless it has to wait for data.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param buffer The address of the first sample.
- * @param numFrames Number of frames to read. Only complete frames will be written.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
- void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds);
-
-/**
- * Write data to the stream.
- *
- * The call will wait until the write is complete or until it runs out of time.
- * If timeoutNanos is zero then this call will not wait.
- *
- * Note that timeoutNanoseconds is a relative duration in wall clock time.
- * Time will not stop if the thread is asleep.
- * So it will be implemented using CLOCK_BOOTTIME.
- *
- * This call is "strong non-blocking" unless it has to wait for room in the buffer.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param buffer The address of the first sample.
- * @param numFrames Number of frames to write. Only complete frames will be written.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
- const void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds);
-
-
-// ============================================================
-// High priority audio threads
-// ============================================================
-
-typedef void *(oboe_audio_thread_proc_t)(void *);
-
-/**
- * Create a thread associated with a stream. The thread has special properties for
- * low latency audio performance. This thread can be used to implement a callback API.
- *
- * Only one thread may be associated with a stream.
- *
- * Note that this API is in flux.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
- * @param startRoutine your thread entry point
- * @param arg an argument that will be passed to your thread entry point
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
- oboe_nanoseconds_t periodNanoseconds,
- oboe_audio_thread_proc_t *threadProc,
- void *arg);
-
-/**
- * Wait until the thread exits or an error occurs.
- * The thread handle will be deleted.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param returnArg a pointer to a variable to receive the return value
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
- void **returnArg,
- oboe_nanoseconds_t timeoutNanoseconds);
-
-// ============================================================
-// Stream - queries
-// ============================================================
-
-
-/**
- * This can be used to adjust the latency of the buffer by changing
- * the threshold where blocking will occur.
- * By combining this with OboeStream_getUnderrunCount(), the latency can be tuned
- * at run-time for each device.
- *
- * This cannot be set higher than OboeStream_getBufferCapacity().
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param requestedFrames requested number of frames that can be filled without blocking
- * @param actualFrames receives final number of frames
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
- oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames);
-
-/**
- * Query the maximum number of frames that can be filled without blocking.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer size
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * Query the number of frames that are read or written by the endpoint at one time.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the burst size
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * Query maximum buffer capacity in frames.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer capacity
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * An XRun is an Underrun or an Overrun.
- * During playing, an underrun will occur if the stream is not written in time
- * and the system runs out of valid data.
- * During recording, an overrun will occur if the stream is not read in time
- * and there is no place to put the incoming data so it is discarded.
- *
- * An underrun or overrun can cause an audible "pop" or "glitch".
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param xRunCount pointer to variable to receive the underrun or overrun count
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param sampleRate pointer to variable to receive the actual sample rate
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate);
-
-/**
- * The samplesPerFrame is also known as channelCount.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param samplesPerFrame pointer to variable to receive the actual samples per frame
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param deviceId pointer to variable to receive the actual device ID
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream, oboe_device_id_t *deviceId);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param format pointer to variable to receive the actual data format
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format);
-
-/**
- * Provide actual sharing mode.
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param sharingMode pointer to variable to receive the actual sharing mode
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
- oboe_sharing_mode_t *sharingMode);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param direction pointer to a variable to be set to the current direction.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, oboe_direction_t *direction);
-
-/**
- * Passes back the number of frames that have been written since the stream was created.
- * For an output stream, this will be advanced by the application calling write().
- * For an input stream, this will be advanced by the device or service.
- *
- * The frame position is monotonically increasing.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
- oboe_position_frames_t *frames);
-
-/**
- * Passes back the number of frames that have been read since the stream was created.
- * For an output stream, this will be advanced by the device or service.
- * For an input stream, this will be advanced by the application calling read().
- *
- * The frame position is monotonically increasing.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames);
-
-/**
- * Passes back the time at which a particular frame was presented.
- * This can be used to synchronize audio with video or MIDI.
- * It can also be used to align a recorded stream with a playback stream.
- *
- * Timestamps are only valid when the stream is in OBOE_STREAM_STATE_STARTED.
- * OBOE_ERROR_INVALID_STATE will be returned if the stream is not started.
- * Note that because requestStart() is asynchronous, timestamps will not be valid until
- * a short time after calling requestStart().
- * So OBOE_ERROR_INVALID_STATE should not be considered a fatal error.
- * Just try calling again later.
- *
- * If an error occurs, then the position and time will not be modified.
- *
- * The position and time passed back are monotonically increasing.
- *
- * @param stream A handle provided by OboeStreamBuilder_openStream()
- * @param clockid OBOE_CLOCK_MONOTONIC or OBOE_CLOCK_BOOTTIME
- * @param framePosition pointer to a variable to receive the position
- * @param timeNanoseconds pointer to a variable to receive the time
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
- oboe_clockid_t clockid,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif //OBOE_OBOEAUDIO_H
diff --git a/media/liboboe/include/oboe/OboeDefinitions.h b/media/liboboe/include/oboe/OboeDefinitions.h
deleted file mode 100644
index 9d56a24..0000000
--- a/media/liboboe/include/oboe/OboeDefinitions.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOEDEFINITIONS_H
-#define OBOE_OBOEDEFINITIONS_H
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef int32_t oboe_handle_t; // negative handles are error codes
-typedef int32_t oboe_result_t;
-/**
- * A platform specific identifier for a device.
- */
-typedef int32_t oboe_device_id_t;
-typedef int32_t oboe_sample_rate_t;
-/** This is used for small quantities such as the number of frames in a buffer. */
-typedef int32_t oboe_size_frames_t;
-/** This is used for small quantities such as the number of bytes in a frame. */
-typedef int32_t oboe_size_bytes_t;
-/**
- * This is used for large quantities, such as the number of frames that have
- * been played since a stream was started.
- * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
- */
-typedef int64_t oboe_position_frames_t;
-
-typedef int64_t oboe_nanoseconds_t;
-
-/**
- * This is used to represent a value that has not been specified.
- * For example, an application could use OBOE_UNSPECIFIED to indicate
- * that is did not not care what the specific value of a parameter was
- * and would accept whatever it was given.
- */
-#define OBOE_UNSPECIFIED 0
-#define OBOE_DEVICE_UNSPECIFIED ((oboe_device_id_t) -1)
-#define OBOE_NANOS_PER_MICROSECOND ((int64_t)1000)
-#define OBOE_NANOS_PER_MILLISECOND (OBOE_NANOS_PER_MICROSECOND * 1000)
-#define OBOE_MILLIS_PER_SECOND 1000
-#define OBOE_NANOS_PER_SECOND (OBOE_NANOS_PER_MILLISECOND * OBOE_MILLIS_PER_SECOND)
-
-#define OBOE_HANDLE_INVALID ((oboe_handle_t)-1)
-
-enum oboe_direction_t {
- OBOE_DIRECTION_OUTPUT,
- OBOE_DIRECTION_INPUT,
- OBOE_DIRECTION_COUNT // This should always be last.
-};
-
-enum oboe_audio_format_t {
- OBOE_AUDIO_FORMAT_INVALID = -1,
- OBOE_AUDIO_FORMAT_UNSPECIFIED = 0,
- OBOE_AUDIO_FORMAT_PCM16, // TODO rename to _PCM_I16
- OBOE_AUDIO_FORMAT_PCM_FLOAT,
- OBOE_AUDIO_FORMAT_PCM824, // TODO rename to _PCM_I8_24
- OBOE_AUDIO_FORMAT_PCM32 // TODO rename to _PCM_I32
-};
-
-enum {
- OBOE_OK,
- OBOE_ERROR_BASE = -900, // TODO review
- OBOE_ERROR_DISCONNECTED,
- OBOE_ERROR_ILLEGAL_ARGUMENT,
- OBOE_ERROR_INCOMPATIBLE,
- OBOE_ERROR_INTERNAL, // an underlying API returned an error code
- OBOE_ERROR_INVALID_STATE,
- OBOE_ERROR_UNEXPECTED_STATE,
- OBOE_ERROR_UNEXPECTED_VALUE,
- OBOE_ERROR_INVALID_HANDLE,
- OBOE_ERROR_INVALID_QUERY,
- OBOE_ERROR_UNIMPLEMENTED,
- OBOE_ERROR_UNAVAILABLE,
- OBOE_ERROR_NO_FREE_HANDLES,
- OBOE_ERROR_NO_MEMORY,
- OBOE_ERROR_NULL,
- OBOE_ERROR_TIMEOUT,
- OBOE_ERROR_WOULD_BLOCK,
- OBOE_ERROR_INVALID_ORDER,
- OBOE_ERROR_OUT_OF_RANGE
-};
-
-typedef enum {
- OBOE_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
- OBOE_CLOCK_BOOTTIME, // Clock since booted, runs all the time.
- OBOE_CLOCK_COUNT // This should always be last.
-} oboe_clockid_t;
-
-typedef enum
-{
- OBOE_STREAM_STATE_UNINITIALIZED = 0,
- OBOE_STREAM_STATE_OPEN,
- OBOE_STREAM_STATE_STARTING,
- OBOE_STREAM_STATE_STARTED,
- OBOE_STREAM_STATE_PAUSING,
- OBOE_STREAM_STATE_PAUSED,
- OBOE_STREAM_STATE_FLUSHING,
- OBOE_STREAM_STATE_FLUSHED,
- OBOE_STREAM_STATE_STOPPING,
- OBOE_STREAM_STATE_STOPPED,
- OBOE_STREAM_STATE_CLOSING,
- OBOE_STREAM_STATE_CLOSED,
-} oboe_stream_state_t;
-
-// TODO review API
-typedef enum {
- /**
- * This will use an AudioTrack object for playing audio
- * and an AudioRecord for recording data.
- */
- OBOE_SHARING_MODE_LEGACY,
- /**
- * This will be the only stream using a particular source or sink.
- * This mode will provide the lowest possible latency.
- * You should close EXCLUSIVE streams immediately when you are not using them.
- */
- OBOE_SHARING_MODE_EXCLUSIVE,
- /**
- * Multiple applications will be mixed by the Oboe Server.
- * This will have higher latency than the EXCLUSIVE mode.
- */
- OBOE_SHARING_MODE_SHARED,
- /**
- * Multiple applications will do their own mixing into a memory mapped buffer.
- * It may be possible for malicious applications to read the data produced by
- * other apps. So do not use this for private data such as telephony or messaging.
- */
- OBOE_SHARING_MODE_PUBLIC_MIX,
- OBOE_SHARING_MODE_COUNT // This should always be last.
-} oboe_sharing_mode_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // OBOE_OBOEDEFINITIONS_H
diff --git a/media/liboboe/liboboe.map.txt b/media/liboboe/liboboe.map.txt
deleted file mode 100644
index 9be7fe1..0000000
--- a/media/liboboe/liboboe.map.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-LIBOBOE {
- global:
- Oboe_getNanoseconds;
- Oboe_convertResultToText;
- Oboe_convertStreamStateToText;
- Oboe_createStreamBuilder;
- OboeStreamBuilder_setDeviceId;
- OboeStreamBuilder_setSampleRate;
- OboeStreamBuilder_getSampleRate;
- OboeStreamBuilder_setSamplesPerFrame;
- OboeStreamBuilder_getSamplesPerFrame;
- OboeStreamBuilder_setFormat;
- OboeStreamBuilder_getFormat;
- OboeStreamBuilder_setSharingMode;
- OboeStreamBuilder_getSharingMode;
- OboeStreamBuilder_setDirection;
- OboeStreamBuilder_getDirection;
- OboeStreamBuilder_openStream;
- OboeStreamBuilder_delete;
- OboeStream_close;
- OboeStream_requestStart;
- OboeStream_requestPause;
- OboeStream_requestFlush;
- OboeStream_requestStop;
- OboeStream_getState;
- OboeStream_waitForStateChange;
- OboeStream_read;
- OboeStream_write;
- OboeStream_createThread;
- OboeStream_joinThread;
- OboeStream_setBufferSize;
- OboeStream_getBufferSize;
- OboeStream_getFramesPerBurst;
- OboeStream_getBufferCapacity;
- OboeStream_getXRunCount;
- OboeStream_getSampleRate;
- OboeStream_getSamplesPerFrame;
- OboeStream_getFormat;
- OboeStream_getSharingMode;
- OboeStream_getDirection;
- OboeStream_getFramesWritten;
- OboeStream_getFramesRead;
- OboeStream_getTimestamp;
- local:
- *;
-};
diff --git a/media/liboboe/src/binding/IOboeAudioService.cpp b/media/liboboe/src/binding/IOboeAudioService.cpp
deleted file mode 100644
index a3437b2..0000000
--- a/media/liboboe/src/binding/IOboeAudioService.cpp
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <oboe/OboeDefinitions.h>
-
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/OboeStreamRequest.h"
-#include "binding/OboeStreamConfiguration.h"
-#include "binding/IOboeAudioService.h"
-
-namespace android {
-
-/**
- * This is used by the Oboe Client to talk to the Oboe Service.
- *
- * The order of parameters in the Parcels must match with code in OboeAudioService.cpp.
- */
-class BpOboeAudioService : public BpInterface<IOboeAudioService>
-{
-public:
- explicit BpOboeAudioService(const sp<IBinder>& impl)
- : BpInterface<IOboeAudioService>(impl)
- {
- }
-
- virtual oboe_handle_t openStream(oboe::OboeStreamRequest &request,
- oboe::OboeStreamConfiguration &configuration) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- request.writeToParcel(&data);
- status_t err = remote()->transact(OPEN_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_handle_t stream;
- reply.readInt32(&stream);
- configuration.readFromParcel(&reply);
- return stream;
- }
-
- virtual oboe_result_t closeStream(int32_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- parcelable.readFromParcel(&reply);
- parcelable.dump();
- oboe_result_t result = parcelable.validate();
- if (result != OBOE_OK) {
- return result;
- }
- reply.readInt32(&result);
- return result;
- }
-
- // TODO should we wait for a reply?
- virtual oboe_result_t startStream(oboe_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(START_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual oboe_result_t flushStream(oboe_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual void tickle() override { // TODO remove after service thread implemented
- Parcel data;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- remote()->transact(TICKLE, data, nullptr);
- }
-
- virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
- oboe_nanoseconds_t periodNanoseconds)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- data.writeInt64(periodNanoseconds);
- status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return OBOE_ERROR_INTERNAL; // TODO consider another error
- }
- // parse reply
- oboe_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
-};
-
-// Implement an interface to the service.
-// This is here so that you don't have to link with liboboe static library.
-IMPLEMENT_META_INTERFACE(OboeAudioService, "IOboeAudioService");
-
-// The order of parameters in the Parcels must match with code in BpOboeAudioService
-
-status_t BnOboeAudioService::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- OboeStream stream;
- OboeStreamRequest request;
- OboeStreamConfiguration configuration;
- pid_t pid;
- oboe_nanoseconds_t nanoseconds;
- oboe_result_t result;
- ALOGV("BnOboeAudioService::onTransact(%i) %i", code, flags);
- data.checkInterface(this);
-
- switch(code) {
- case OPEN_STREAM: {
- request.readFromParcel(&data);
- stream = openStream(request, configuration);
- ALOGD("BnOboeAudioService::onTransact OPEN_STREAM 0x%08X", stream);
- reply->writeInt32(stream);
- configuration.writeToParcel(reply);
- return NO_ERROR;
- } break;
-
- case CLOSE_STREAM: {
- data.readInt32(&stream);
- ALOGD("BnOboeAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
- result = closeStream(stream);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case GET_STREAM_DESCRIPTION: {
- data.readInt32(&stream);
- ALOGD("BnOboeAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
- oboe::AudioEndpointParcelable parcelable;
- result = getStreamDescription(stream, parcelable);
- if (result != OBOE_OK) {
- return -1; // FIXME
- }
- parcelable.dump();
- result = parcelable.validate();
- if (result != OBOE_OK) {
- return -1; // FIXME
- }
- parcelable.writeToParcel(reply);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case START_STREAM: {
- data.readInt32(&stream);
- result = startStream(stream);
- ALOGD("BnOboeAudioService::onTransact START_STREAM 0x%08X, result = %d",
- stream, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case PAUSE_STREAM: {
- data.readInt32(&stream);
- result = pauseStream(stream);
- ALOGD("BnOboeAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
- stream, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case FLUSH_STREAM: {
- data.readInt32(&stream);
- result = flushStream(stream);
- ALOGD("BnOboeAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
- stream, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case REGISTER_AUDIO_THREAD: {
- data.readInt32(&stream);
- data.readInt32(&pid);
- data.readInt64(&nanoseconds);
- result = registerAudioThread(stream, pid, nanoseconds);
- ALOGD("BnOboeAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
- stream, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case UNREGISTER_AUDIO_THREAD: {
- data.readInt32(&stream);
- data.readInt32(&pid);
- result = unregisterAudioThread(stream, pid);
- ALOGD("BnOboeAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
- stream, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case TICKLE: {
- ALOGV("BnOboeAudioService::onTransact TICKLE");
- tickle();
- return NO_ERROR;
- } break;
-
- default:
- // ALOGW("BnOboeAudioService::onTransact not handled %u", code);
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} /* namespace android */
diff --git a/media/liboboe/src/binding/IOboeAudioService.h b/media/liboboe/src/binding/IOboeAudioService.h
deleted file mode 100644
index 4b4c99c..0000000
--- a/media/liboboe/src/binding/IOboeAudioService.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef BINDING_IOBOEAUDIOSERVICE_H
-#define BINDING_IOBOEAUDIOSERVICE_H
-
-#include <stdint.h>
-#include <utils/RefBase.h>
-#include <binder/TextOutput.h>
-#include <binder/IInterface.h>
-
-#include <oboe/OboeAudio.h>
-
-#include "binding/OboeServiceDefinitions.h"
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/OboeStreamRequest.h"
-#include "binding/OboeStreamConfiguration.h"
-
-//using android::status_t;
-//using android::IInterface;
-//using android::BnInterface;
-
-using oboe::AudioEndpointParcelable;
-using oboe::OboeStreamRequest;
-using oboe::OboeStreamConfiguration;
-
-namespace android {
-
-// Interface (our AIDL) - Shared by server and client
-class IOboeAudioService : public IInterface {
-public:
-
- DECLARE_META_INTERFACE(OboeAudioService);
-
- virtual oboe_handle_t openStream(OboeStreamRequest &request,
- OboeStreamConfiguration &configuration) = 0;
-
- virtual oboe_result_t closeStream(int32_t streamHandle) = 0;
-
- /* Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
- virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) = 0;
-
- /**
- * Start the flow of data.
- */
- virtual oboe_result_t startStream(oboe_handle_t streamHandle) = 0;
-
- /**
- * Stop the flow of data such that start() can resume without loss of data.
- */
- virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) = 0;
-
- /**
- * Discard any data held by the underlying HAL or Service.
- */
- virtual oboe_result_t flushStream(oboe_handle_t streamHandle) = 0;
-
- /**
- * Manage the specified thread as a low latency audio thread.
- */
- virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
- oboe_nanoseconds_t periodNanoseconds) = 0;
-
- virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle,
- pid_t clientThreadId) = 0;
-
- /**
- * Poke server instead of running a background thread.
- * Cooperative multi-tasking for early development only.
- * TODO remove tickle() when service has its own thread.
- */
- virtual void tickle() { };
-
-};
-
-class BnOboeAudioService : public BnInterface<IOboeAudioService> {
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags = 0);
-
-};
-
-} /* namespace android */
-
-#endif //BINDING_IOBOEAUDIOSERVICE_H
diff --git a/media/liboboe/src/binding/OboeServiceMessage.h b/media/liboboe/src/binding/OboeServiceMessage.h
deleted file mode 100644
index aa13571..0000000
--- a/media/liboboe/src/binding/OboeServiceMessage.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOE_SERVICE_MESSAGE_H
-#define OBOE_OBOE_SERVICE_MESSAGE_H
-
-#include <stdint.h>
-
-#include <oboe/OboeDefinitions.h>
-
-namespace oboe {
-
-// TODO move this an "include" folder for the service.
-
-struct OboeMessageTimestamp {
- oboe_position_frames_t position;
- int64_t deviceOffset; // add to client position to get device position
- oboe_nanoseconds_t timestamp;
-};
-
-typedef enum oboe_service_event_e : uint32_t {
- OBOE_SERVICE_EVENT_STARTED,
- OBOE_SERVICE_EVENT_PAUSED,
- OBOE_SERVICE_EVENT_FLUSHED,
- OBOE_SERVICE_EVENT_CLOSED,
- OBOE_SERVICE_EVENT_DISCONNECTED
-} oboe_service_event_t;
-
-struct OboeMessageEvent {
- oboe_service_event_t event;
- int32_t data1;
- int64_t data2;
-};
-
-typedef struct OboeServiceMessage_s {
- enum class code : uint32_t {
- NOTHING,
- TIMESTAMP,
- EVENT,
- };
-
- code what;
- union {
- OboeMessageTimestamp timestamp;
- OboeMessageEvent event;
- };
-} OboeServiceMessage;
-
-
-} /* namespace oboe */
-
-#endif //OBOE_OBOE_SERVICE_MESSAGE_H
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.cpp b/media/liboboe/src/binding/OboeStreamConfiguration.cpp
deleted file mode 100644
index 4b8b5b2..0000000
--- a/media/liboboe/src/binding/OboeStreamConfiguration.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdint.h>
-
-#include <sys/mman.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
-
-#include <oboe/OboeDefinitions.h>
-
-#include "binding/OboeStreamConfiguration.h"
-
-using android::NO_ERROR;
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
-using namespace oboe;
-
-OboeStreamConfiguration::OboeStreamConfiguration() {}
-OboeStreamConfiguration::~OboeStreamConfiguration() {}
-
-status_t OboeStreamConfiguration::writeToParcel(Parcel* parcel) const {
- parcel->writeInt32(mDeviceId);
- parcel->writeInt32(mSampleRate);
- parcel->writeInt32(mSamplesPerFrame);
- parcel->writeInt32((int32_t) mAudioFormat);
- return NO_ERROR; // TODO check for errors above
-}
-
-status_t OboeStreamConfiguration::readFromParcel(const Parcel* parcel) {
- int32_t temp;
- parcel->readInt32(&mDeviceId);
- parcel->readInt32(&mSampleRate);
- parcel->readInt32(&mSamplesPerFrame);
- parcel->readInt32(&temp);
- mAudioFormat = (oboe_audio_format_t) temp;
- return NO_ERROR; // TODO check for errors above
-}
-
-oboe_result_t OboeStreamConfiguration::validate() {
- // Validate results of the open.
- if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
- ALOGE("OboeStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
- return OBOE_ERROR_INTERNAL;
- }
-
- if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
- ALOGE("OboeStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
- return OBOE_ERROR_INTERNAL;
- }
-
- switch (mAudioFormat) {
- case OBOE_AUDIO_FORMAT_PCM16:
- case OBOE_AUDIO_FORMAT_PCM_FLOAT:
- case OBOE_AUDIO_FORMAT_PCM824:
- case OBOE_AUDIO_FORMAT_PCM32:
- break;
- default:
- ALOGE("OboeStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
- return OBOE_ERROR_INTERNAL;
- }
- return OBOE_OK;
-}
-
-void OboeStreamConfiguration::dump() {
- ALOGD("OboeStreamConfiguration mSampleRate = %d -----", mSampleRate);
- ALOGD("OboeStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
- ALOGD("OboeStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
-}
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.h b/media/liboboe/src/binding/OboeStreamConfiguration.h
deleted file mode 100644
index 6bc1924..0000000
--- a/media/liboboe/src/binding/OboeStreamConfiguration.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef BINDING_OBOE_STREAM_CONFIGURATION_H
-#define BINDING_OBOE_STREAM_CONFIGURATION_H
-
-#include <stdint.h>
-
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
-#include <oboe/OboeDefinitions.h>
-
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
-namespace oboe {
-
-class OboeStreamConfiguration : public Parcelable {
-public:
- OboeStreamConfiguration();
- virtual ~OboeStreamConfiguration();
-
- oboe_device_id_t getDeviceId() const {
- return mDeviceId;
- }
-
- void setDeviceId(oboe_device_id_t deviceId) {
- mDeviceId = deviceId;
- }
-
- oboe_sample_rate_t getSampleRate() const {
- return mSampleRate;
- }
-
- void setSampleRate(oboe_sample_rate_t sampleRate) {
- mSampleRate = sampleRate;
- }
-
- int32_t getSamplesPerFrame() const {
- return mSamplesPerFrame;
- }
-
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
- oboe_audio_format_t getAudioFormat() const {
- return mAudioFormat;
- }
-
- void setAudioFormat(oboe_audio_format_t audioFormat) {
- mAudioFormat = audioFormat;
- }
-
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
- oboe_result_t validate();
-
- void dump();
-
-protected:
- oboe_device_id_t mDeviceId = OBOE_DEVICE_UNSPECIFIED;
- oboe_sample_rate_t mSampleRate = OBOE_UNSPECIFIED;
- int32_t mSamplesPerFrame = OBOE_UNSPECIFIED;
- oboe_audio_format_t mAudioFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
-};
-
-} /* namespace oboe */
-
-#endif //BINDING_OBOE_STREAM_CONFIGURATION_H
diff --git a/media/liboboe/src/client/AudioStreamInternal.cpp b/media/liboboe/src/client/AudioStreamInternal.cpp
deleted file mode 100644
index 0d169e1..0000000
--- a/media/liboboe/src/client/AudioStreamInternal.cpp
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <assert.h>
-
-#include <binder/IServiceManager.h>
-
-#include <oboe/OboeAudio.h>
-
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-#include "binding/OboeStreamRequest.h"
-#include "binding/OboeStreamConfiguration.h"
-#include "binding/IOboeAudioService.h"
-#include "binding/OboeServiceMessage.h"
-
-#include "AudioStreamInternal.h"
-
-#define LOG_TIMESTAMPS 0
-
-using android::String16;
-using android::IServiceManager;
-using android::defaultServiceManager;
-using android::interface_cast;
-
-using namespace oboe;
-
-// Helper function to get access to the "OboeAudioService" service.
-static sp<IOboeAudioService> getOboeAudioService() {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("OboeAudioService"));
- // TODO: If the "OboeHack" service is not running, getService times out and binder == 0.
- sp<IOboeAudioService> service = interface_cast<IOboeAudioService>(binder);
- return service;
-}
-
-AudioStreamInternal::AudioStreamInternal()
- : AudioStream()
- , mClockModel()
- , mAudioEndpoint()
- , mServiceStreamHandle(OBOE_HANDLE_INVALID)
- , mFramesPerBurst(16)
-{
- // TODO protect against mService being NULL;
- // TODO Model access to the service on frameworks/av/media/libaudioclient/AudioSystem.cpp
- mService = getOboeAudioService();
-}
-
-AudioStreamInternal::~AudioStreamInternal() {
-}
-
-oboe_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
-
- oboe_result_t result = OBOE_OK;
- OboeStreamRequest request;
- OboeStreamConfiguration configuration;
-
- result = AudioStream::open(builder);
- if (result < 0) {
- return result;
- }
-
- // Build the request.
- request.setUserId(getuid());
- request.setProcessId(getpid());
- request.getConfiguration().setDeviceId(getDeviceId());
- request.getConfiguration().setSampleRate(getSampleRate());
- request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
- request.getConfiguration().setAudioFormat(getFormat());
- request.dump();
-
- mServiceStreamHandle = mService->openStream(request, configuration);
- ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
- (unsigned int)mServiceStreamHandle);
- if (mServiceStreamHandle < 0) {
- result = mServiceStreamHandle;
- ALOGE("AudioStreamInternal.open(): acquireRealtimeStream oboe_result_t = 0x%08X", result);
- } else {
- result = configuration.validate();
- if (result != OBOE_OK) {
- close();
- return result;
- }
- // Save results of the open.
- setSampleRate(configuration.getSampleRate());
- setSamplesPerFrame(configuration.getSamplesPerFrame());
- setFormat(configuration.getAudioFormat());
-
- oboe::AudioEndpointParcelable parcelable;
- result = mService->getStreamDescription(mServiceStreamHandle, parcelable);
- if (result != OBOE_OK) {
- ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
- mService->closeStream(mServiceStreamHandle);
- return result;
- }
- // resolve parcelable into a descriptor
- parcelable.resolve(&mEndpointDescriptor);
-
- // Configure endpoint based on descriptor.
- mAudioEndpoint.configure(&mEndpointDescriptor);
-
-
- mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
- assert(mFramesPerBurst >= 16);
- assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
-
- mClockModel.setSampleRate(getSampleRate());
- mClockModel.setFramesPerBurst(mFramesPerBurst);
-
- setState(OBOE_STREAM_STATE_OPEN);
- }
- return result;
-}
-
-oboe_result_t AudioStreamInternal::close() {
- ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
- if (mServiceStreamHandle != OBOE_HANDLE_INVALID) {
- mService->closeStream(mServiceStreamHandle);
- mServiceStreamHandle = OBOE_HANDLE_INVALID;
- return OBOE_OK;
- } else {
- return OBOE_ERROR_INVALID_STATE;
- }
-}
-
-oboe_result_t AudioStreamInternal::requestStart()
-{
- oboe_nanoseconds_t startTime;
- ALOGD("AudioStreamInternal(): start()");
- if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
- return OBOE_ERROR_INVALID_STATE;
- }
- startTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
- mClockModel.start(startTime);
- processTimestamp(0, startTime);
- setState(OBOE_STREAM_STATE_STARTING);
- return mService->startStream(mServiceStreamHandle);
-}
-
-oboe_result_t AudioStreamInternal::requestPause()
-{
- ALOGD("AudioStreamInternal(): pause()");
- if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
- return OBOE_ERROR_INVALID_STATE;
- }
- mClockModel.stop(Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC));
- setState(OBOE_STREAM_STATE_PAUSING);
- return mService->pauseStream(mServiceStreamHandle);
-}
-
-oboe_result_t AudioStreamInternal::requestFlush() {
- ALOGD("AudioStreamInternal(): flush()");
- if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
- return OBOE_ERROR_INVALID_STATE;
- }
- setState(OBOE_STREAM_STATE_FLUSHING);
- return mService->flushStream(mServiceStreamHandle);
-}
-
-void AudioStreamInternal::onFlushFromServer() {
- ALOGD("AudioStreamInternal(): onFlushFromServer()");
- oboe_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
- oboe_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
- // Bump offset so caller does not see the retrograde motion in getFramesRead().
- oboe_position_frames_t framesFlushed = writeCounter - readCounter;
- mFramesOffsetFromService += framesFlushed;
- // Flush written frames by forcing writeCounter to readCounter.
- // This is because we cannot move the read counter in the hardware.
- mAudioEndpoint.setDownDataWriteCounter(readCounter);
-}
-
-oboe_result_t AudioStreamInternal::requestStop()
-{
- // TODO better implementation of requestStop()
- oboe_result_t result = requestPause();
- if (result == OBOE_OK) {
- oboe_stream_state_t state;
- result = waitForStateChange(OBOE_STREAM_STATE_PAUSING,
- &state,
- 500 * OBOE_NANOS_PER_MILLISECOND);// TODO temporary code
- if (result == OBOE_OK) {
- result = requestFlush();
- }
- }
- return result;
-}
-
-oboe_result_t AudioStreamInternal::registerThread() {
- ALOGD("AudioStreamInternal(): registerThread()");
- if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
- return OBOE_ERROR_INVALID_STATE;
- }
- return mService->registerAudioThread(mServiceStreamHandle,
- gettid(),
- getPeriodNanoseconds());
-}
-
-oboe_result_t AudioStreamInternal::unregisterThread() {
- ALOGD("AudioStreamInternal(): unregisterThread()");
- if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
- return OBOE_ERROR_INVALID_STATE;
- }
- return mService->unregisterAudioThread(mServiceStreamHandle, gettid());
-}
-
-// TODO use oboe_clockid_t all the way down to AudioClock
-oboe_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds) {
-// TODO implement using real HAL
- oboe_nanoseconds_t time = AudioClock::getNanoseconds();
- *framePosition = mClockModel.convertTimeToPosition(time);
- *timeNanoseconds = time + (10 * OBOE_NANOS_PER_MILLISECOND); // Fake hardware delay
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamInternal::updateState() {
- return processCommands();
-}
-
-#if LOG_TIMESTAMPS
-static void AudioStreamInternal_LogTimestamp(OboeServiceMessage &command) {
- static int64_t oldPosition = 0;
- static oboe_nanoseconds_t oldTime = 0;
- int64_t framePosition = command.timestamp.position;
- oboe_nanoseconds_t nanoTime = command.timestamp.timestamp;
- ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
- (long long) framePosition,
- (long long) nanoTime);
- int64_t nanosDelta = nanoTime - oldTime;
- if (nanosDelta > 0 && oldTime > 0) {
- int64_t framesDelta = framePosition - oldPosition;
- int64_t rate = (framesDelta * OBOE_NANOS_PER_SECOND) / nanosDelta;
- ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
- ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
- ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
- }
- oldPosition = framePosition;
- oldTime = nanoTime;
-}
-#endif
-
-oboe_result_t AudioStreamInternal::onTimestampFromServer(OboeServiceMessage *message) {
- oboe_position_frames_t framePosition = 0;
-#if LOG_TIMESTAMPS
- AudioStreamInternal_LogTimestamp(command);
-#endif
- framePosition = message->timestamp.position;
- processTimestamp(framePosition, message->timestamp.timestamp);
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamInternal::onEventFromServer(OboeServiceMessage *message) {
- oboe_result_t result = OBOE_OK;
- ALOGD("processCommands() got event %d", message->event.event);
- switch (message->event.event) {
- case OBOE_SERVICE_EVENT_STARTED:
- ALOGD("processCommands() got OBOE_SERVICE_EVENT_STARTED");
- setState(OBOE_STREAM_STATE_STARTED);
- break;
- case OBOE_SERVICE_EVENT_PAUSED:
- ALOGD("processCommands() got OBOE_SERVICE_EVENT_PAUSED");
- setState(OBOE_STREAM_STATE_PAUSED);
- break;
- case OBOE_SERVICE_EVENT_FLUSHED:
- ALOGD("processCommands() got OBOE_SERVICE_EVENT_FLUSHED");
- setState(OBOE_STREAM_STATE_FLUSHED);
- onFlushFromServer();
- break;
- case OBOE_SERVICE_EVENT_CLOSED:
- ALOGD("processCommands() got OBOE_SERVICE_EVENT_CLOSED");
- setState(OBOE_STREAM_STATE_CLOSED);
- break;
- case OBOE_SERVICE_EVENT_DISCONNECTED:
- result = OBOE_ERROR_DISCONNECTED;
- ALOGW("WARNING - processCommands() OBOE_SERVICE_EVENT_DISCONNECTED");
- break;
- default:
- ALOGW("WARNING - processCommands() Unrecognized event = %d",
- (int) message->event.event);
- break;
- }
- return result;
-}
-
-// Process all the commands coming from the server.
-oboe_result_t AudioStreamInternal::processCommands() {
- oboe_result_t result = OBOE_OK;
-
- // Let the service run in case it is a fake service simulator.
- mService->tickle(); // TODO use real service thread
-
- while (result == OBOE_OK) {
- OboeServiceMessage message;
- if (mAudioEndpoint.readUpCommand(&message) != 1) {
- break; // no command this time, no problem
- }
- switch (message.what) {
- case OboeServiceMessage::code::TIMESTAMP:
- result = onTimestampFromServer(&message);
- break;
-
- case OboeServiceMessage::code::EVENT:
- result = onEventFromServer(&message);
- break;
-
- default:
- ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
- (int) message.what);
- result = OBOE_ERROR_UNEXPECTED_VALUE;
- break;
- }
- }
- return result;
-}
-
-// Write the data, block if needed and timeoutMillis > 0
-oboe_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- oboe_result_t result = OBOE_OK;
- uint8_t* source = (uint8_t*)buffer;
- oboe_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
- oboe_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
- int32_t framesLeft = numFrames;
-// ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
-// buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
-
- // Write until all the data has been written or until a timeout occurs.
- while (framesLeft > 0) {
- // The call to writeNow() will not block. It will just write as much as it can.
- oboe_nanoseconds_t wakeTimeNanos = 0;
- oboe_result_t framesWritten = writeNow(source, framesLeft,
- currentTimeNanos, &wakeTimeNanos);
-// ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
- if (framesWritten < 0) {
- result = framesWritten;
- break;
- }
- framesLeft -= (int32_t) framesWritten;
- source += framesWritten * getBytesPerFrame();
-
- // Should we block?
- if (timeoutNanoseconds == 0) {
- break; // don't block
- } else if (framesLeft > 0) {
- //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
- // clip the wake time to something reasonable
- if (wakeTimeNanos < currentTimeNanos) {
- wakeTimeNanos = currentTimeNanos;
- }
- if (wakeTimeNanos > deadlineNanos) {
- // If we time out, just return the framesWritten so far.
- ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
- break;
- }
-
- //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
- // (long long) (wakeTimeNanos - currentTimeNanos));
- AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
- currentTimeNanos = AudioClock::getNanoseconds();
- }
- }
-
- // return error or framesWritten
- return (result < 0) ? result : numFrames - framesLeft;
-}
-
-// Write as much data as we can without blocking.
-oboe_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
- oboe_nanoseconds_t currentNanoTime, oboe_nanoseconds_t *wakeTimePtr) {
- {
- oboe_result_t result = processCommands();
- if (result != OBOE_OK) {
- return result;
- }
- }
-
- if (mAudioEndpoint.isOutputFreeRunning()) {
- // Update data queue based on the timing model.
- int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
- mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
- // If the read index passed the write index then consider it an underrun.
- if (mAudioEndpoint.getFullFramesAvailable() < 0) {
- mXRunCount++;
- }
- }
- // TODO else query from endpoint cuz set by actual reader, maybe
-
- // Write some data to the buffer.
- int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
- if (framesWritten > 0) {
- incrementFramesWritten(framesWritten);
- }
- //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
- // numFrames, framesWritten);
-
- // Calculate an ideal time to wake up.
- if (wakeTimePtr != nullptr && framesWritten >= 0) {
- // By default wake up a few milliseconds from now. // TODO review
- oboe_nanoseconds_t wakeTime = currentNanoTime + (2 * OBOE_NANOS_PER_MILLISECOND);
- switch (getState()) {
- case OBOE_STREAM_STATE_OPEN:
- case OBOE_STREAM_STATE_STARTING:
- if (framesWritten != 0) {
- // Don't wait to write more data. Just prime the buffer.
- wakeTime = currentNanoTime;
- }
- break;
- case OBOE_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
- {
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
-
- uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
- }
- break;
- default:
- break;
- }
- *wakeTimePtr = wakeTime;
-
- }
-// ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
- return framesWritten;
-}
-
-oboe_result_t AudioStreamInternal::waitForStateChange(oboe_stream_state_t currentState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds)
-
-{
- oboe_result_t result = processCommands();
-// ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
- if (result != OBOE_OK) {
- return result;
- }
- // TODO replace this polling with a timed sleep on a futex on the message queue
- int32_t durationNanos = 5 * OBOE_NANOS_PER_MILLISECOND;
- oboe_stream_state_t state = getState();
-// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
- while (state == currentState && timeoutNanoseconds > 0) {
- // TODO use futex from service message queue
- if (durationNanos > timeoutNanoseconds) {
- durationNanos = timeoutNanoseconds;
- }
- AudioClock::sleepForNanos(durationNanos);
- timeoutNanoseconds -= durationNanos;
-
- result = processCommands();
- if (result != OBOE_OK) {
- return result;
- }
-
- state = getState();
-// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
- }
- if (nextState != nullptr) {
- *nextState = state;
- }
- return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
-}
-
-
-void AudioStreamInternal::processTimestamp(uint64_t position, oboe_nanoseconds_t time) {
- mClockModel.processTimestamp( position, time);
-}
-
-oboe_result_t AudioStreamInternal::setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames) {
- return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
-}
-
-oboe_size_frames_t AudioStreamInternal::getBufferSize() const
-{
- return mAudioEndpoint.getBufferSizeInFrames();
-}
-
-oboe_size_frames_t AudioStreamInternal::getBufferCapacity() const
-{
- return mAudioEndpoint.getBufferCapacityInFrames();
-}
-
-oboe_size_frames_t AudioStreamInternal::getFramesPerBurst() const
-{
- return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
-}
-
-oboe_position_frames_t AudioStreamInternal::getFramesRead()
-{
- oboe_position_frames_t framesRead =
- mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
- + mFramesOffsetFromService;
- // Prevent retrograde motion.
- if (framesRead < mLastFramesRead) {
- framesRead = mLastFramesRead;
- } else {
- mLastFramesRead = framesRead;
- }
- ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
- return framesRead;
-}
-
-// TODO implement getTimestamp
diff --git a/media/liboboe/src/client/AudioStreamInternal.h b/media/liboboe/src/client/AudioStreamInternal.h
deleted file mode 100644
index 6f37761..0000000
--- a/media/liboboe/src/client/AudioStreamInternal.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_AUDIOSTREAMINTERNAL_H
-#define OBOE_AUDIOSTREAMINTERNAL_H
-
-#include <stdint.h>
-#include <oboe/OboeAudio.h>
-
-#include "binding/IOboeAudioService.h"
-#include "binding/AudioEndpointParcelable.h"
-#include "client/IsochronousClockModel.h"
-#include "client/AudioEndpoint.h"
-#include "core/AudioStream.h"
-
-using android::sp;
-using android::IOboeAudioService;
-
-namespace oboe {
-
-// A stream that talks to the OboeService or directly to a HAL.
-class AudioStreamInternal : public AudioStream {
-
-public:
- AudioStreamInternal();
- virtual ~AudioStreamInternal();
-
- // =========== Begin ABSTRACT methods ===========================
- virtual oboe_result_t requestStart() override;
-
- virtual oboe_result_t requestPause() override;
-
- virtual oboe_result_t requestFlush() override;
-
- virtual oboe_result_t requestStop() override;
-
- // TODO use oboe_clockid_t all the way down to AudioClock
- virtual oboe_result_t getTimestamp(clockid_t clockId,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds) override;
-
-
- virtual oboe_result_t updateState() override;
- // =========== End ABSTRACT methods ===========================
-
- virtual oboe_result_t open(const AudioStreamBuilder &builder) override;
-
- virtual oboe_result_t close() override;
-
- virtual oboe_result_t write(const void *buffer,
- int32_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds) override;
-
- virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds) override;
-
- virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames) override;
-
- virtual oboe_size_frames_t getBufferSize() const override;
-
- virtual oboe_size_frames_t getBufferCapacity() const override;
-
- virtual oboe_size_frames_t getFramesPerBurst() const override;
-
- virtual oboe_position_frames_t getFramesRead() override;
-
- virtual int32_t getXRunCount() const override {
- return mXRunCount;
- }
-
- virtual oboe_result_t registerThread() override;
-
- virtual oboe_result_t unregisterThread() override;
-
-protected:
-
- oboe_result_t processCommands();
-
-/**
- * Low level write that will not block. It will just write as much as it can.
- *
- * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
- *
- * @return the number of frames written or a negative error code.
- */
- virtual oboe_result_t writeNow(const void *buffer,
- int32_t numFrames,
- oboe_nanoseconds_t currentTimeNanos,
- oboe_nanoseconds_t *wakeTimePtr);
-
- void onFlushFromServer();
-
- oboe_result_t onEventFromServer(OboeServiceMessage *message);
-
- oboe_result_t onTimestampFromServer(OboeServiceMessage *message);
-
-private:
- IsochronousClockModel mClockModel;
- AudioEndpoint mAudioEndpoint;
- oboe_handle_t mServiceStreamHandle;
- EndpointDescriptor mEndpointDescriptor;
- sp<IOboeAudioService> mService;
- // Offset from underlying frame position.
- oboe_position_frames_t mFramesOffsetFromService = 0;
- oboe_position_frames_t mLastFramesRead = 0;
- oboe_size_frames_t mFramesPerBurst;
- int32_t mXRunCount = 0;
-
- void processTimestamp(uint64_t position, oboe_nanoseconds_t time);
-};
-
-} /* namespace oboe */
-
-#endif //OBOE_AUDIOSTREAMINTERNAL_H
diff --git a/media/liboboe/src/core/AudioStream.h b/media/liboboe/src/core/AudioStream.h
deleted file mode 100644
index c13ae9f..0000000
--- a/media/liboboe/src/core/AudioStream.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_AUDIOSTREAM_H
-#define OBOE_AUDIOSTREAM_H
-
-#include <atomic>
-#include <stdint.h>
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-
-#include "OboeUtilities.h"
-#include "MonotonicCounter.h"
-
-namespace oboe {
-
-class AudioStreamBuilder;
-
-/**
- * Oboe audio stream.
- */
-class AudioStream {
-public:
-
- AudioStream();
-
- virtual ~AudioStream();
-
-
- // =========== Begin ABSTRACT methods ===========================
-
- /* Asynchronous requests.
- * Use waitForStateChange() to wait for completion.
- */
- virtual oboe_result_t requestStart() = 0;
- virtual oboe_result_t requestPause() = 0;
- virtual oboe_result_t requestFlush() = 0;
- virtual oboe_result_t requestStop() = 0;
-
- // TODO use oboe_clockid_t all the way down to AudioClock
- virtual oboe_result_t getTimestamp(clockid_t clockId,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds) = 0;
-
-
- virtual oboe_result_t updateState() = 0;
-
-
- // =========== End ABSTRACT methods ===========================
-
- virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds);
-
- /**
- * Open the stream using the parameters in the builder.
- * Allocate the necessary resources.
- */
- virtual oboe_result_t open(const AudioStreamBuilder& builder);
-
- /**
- * Close the stream and deallocate any resources from the open() call.
- * It is safe to call close() multiple times.
- */
- virtual oboe_result_t close() {
- return OBOE_OK;
- }
-
- virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames) {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- virtual oboe_result_t createThread(oboe_nanoseconds_t periodNanoseconds,
- oboe_audio_thread_proc_t *threadProc,
- void *threadArg);
-
- virtual oboe_result_t joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds);
-
- virtual oboe_result_t registerThread() {
- return OBOE_OK;
- }
-
- virtual oboe_result_t unregisterThread() {
- return OBOE_OK;
- }
-
- /**
- * Internal function used to call the audio thread passed by the user.
- * It is unfortunately public because it needs to be called by a static 'C' function.
- */
- void* wrapUserThread();
-
- // ============== Queries ===========================
-
- virtual oboe_stream_state_t getState() const {
- return mState;
- }
-
- virtual oboe_size_frames_t getBufferSize() const {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- virtual oboe_size_frames_t getBufferCapacity() const {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- virtual oboe_size_frames_t getFramesPerBurst() const {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- virtual int32_t getXRunCount() const {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- bool isPlaying() const {
- return mState == OBOE_STREAM_STATE_STARTING || mState == OBOE_STREAM_STATE_STARTED;
- }
-
- oboe_result_t getSampleRate() const {
- return mSampleRate;
- }
-
- oboe_audio_format_t getFormat() const {
- return mFormat;
- }
-
- oboe_result_t getSamplesPerFrame() const {
- return mSamplesPerFrame;
- }
-
- oboe_device_id_t getDeviceId() const {
- return mDeviceId;
- }
-
- oboe_sharing_mode_t getSharingMode() const {
- return mSharingMode;
- }
-
- oboe_direction_t getDirection() const {
- return mDirection;
- }
-
- oboe_size_bytes_t getBytesPerFrame() const {
- return mSamplesPerFrame * getBytesPerSample();
- }
-
- oboe_size_bytes_t getBytesPerSample() const {
- return OboeConvert_formatToSizeInBytes(mFormat);
- }
-
- virtual oboe_position_frames_t getFramesWritten() {
- return mFramesWritten.get();
- }
-
- virtual oboe_position_frames_t getFramesRead() {
- return mFramesRead.get();
- }
-
-
- // ============== I/O ===========================
- // A Stream will only implement read() or write() depending on its direction.
- virtual oboe_result_t write(const void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds) {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
- virtual oboe_result_t read(void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds) {
- return OBOE_ERROR_UNIMPLEMENTED;
- }
-
-protected:
-
- virtual oboe_position_frames_t incrementFramesWritten(oboe_size_frames_t frames) {
- return static_cast<oboe_position_frames_t>(mFramesWritten.increment(frames));
- }
-
- virtual oboe_position_frames_t incrementFramesRead(oboe_size_frames_t frames) {
- return static_cast<oboe_position_frames_t>(mFramesRead.increment(frames));
- }
-
- /**
- * Wait for a transition from one state to another.
- * @return OBOE_OK if the endingState was observed, or OBOE_ERROR_UNEXPECTED_STATE
- * if any state that was not the startingState or endingState was observed
- * or OBOE_ERROR_TIMEOUT
- */
- virtual oboe_result_t waitForStateTransition(oboe_stream_state_t startingState,
- oboe_stream_state_t endingState,
- oboe_nanoseconds_t timeoutNanoseconds);
-
- /**
- * This should not be called after the open() call.
- */
- void setSampleRate(oboe_sample_rate_t sampleRate) {
- mSampleRate = sampleRate;
- }
-
- /**
- * This should not be called after the open() call.
- */
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
- /**
- * This should not be called after the open() call.
- */
- void setSharingMode(oboe_sharing_mode_t sharingMode) {
- mSharingMode = sharingMode;
- }
-
- /**
- * This should not be called after the open() call.
- */
- void setFormat(oboe_audio_format_t format) {
- mFormat = format;
- }
-
- void setState(oboe_stream_state_t state) {
- mState = state;
- }
-
-
-
-protected:
- MonotonicCounter mFramesWritten;
- MonotonicCounter mFramesRead;
-
- void setPeriodNanoseconds(oboe_nanoseconds_t periodNanoseconds) {
- mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
- }
-
- oboe_nanoseconds_t getPeriodNanoseconds() {
- return mPeriodNanoseconds.load(std::memory_order_acquire);
- }
-
-private:
- // These do not change after open().
- int32_t mSamplesPerFrame = OBOE_UNSPECIFIED;
- oboe_sample_rate_t mSampleRate = OBOE_UNSPECIFIED;
- oboe_stream_state_t mState = OBOE_STREAM_STATE_UNINITIALIZED;
- oboe_device_id_t mDeviceId = OBOE_UNSPECIFIED;
- oboe_sharing_mode_t mSharingMode = OBOE_SHARING_MODE_LEGACY;
- oboe_audio_format_t mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
- oboe_direction_t mDirection = OBOE_DIRECTION_OUTPUT;
-
- // background thread ----------------------------------
- bool mHasThread = false;
- pthread_t mThread; // initialized in constructor
-
- // These are set by the application thread and then read by the audio pthread.
- std::atomic<oboe_nanoseconds_t> mPeriodNanoseconds; // for tuning SCHED_FIFO threads
- // TODO make atomic?
- oboe_audio_thread_proc_t* mThreadProc = nullptr;
- void* mThreadArg = nullptr;
- oboe_result_t mThreadRegistrationResult = OBOE_OK;
-
-
-};
-
-} /* namespace oboe */
-
-#endif /* OBOE_AUDIOSTREAM_H */
diff --git a/media/liboboe/src/core/AudioStreamBuilder.h b/media/liboboe/src/core/AudioStreamBuilder.h
deleted file mode 100644
index ec17eb6..0000000
--- a/media/liboboe/src/core/AudioStreamBuilder.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_AUDIOSTREAMBUILDER_H
-#define OBOE_AUDIOSTREAMBUILDER_H
-
-#include <stdint.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-
-#include "AudioStream.h"
-
-namespace oboe {
-
-/**
- * Factory class for an AudioStream.
- */
-class AudioStreamBuilder {
-public:
- AudioStreamBuilder();
-
- ~AudioStreamBuilder();
-
- int getSamplesPerFrame() const {
- return mSamplesPerFrame;
- }
-
- /**
- * This is also known as channelCount.
- */
- AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- return this;
- }
-
- oboe_direction_t getDirection() const {
- return mDirection;
- }
-
- AudioStreamBuilder* setDirection(oboe_direction_t direction) {
- mDirection = direction;
- return this;
- }
-
- oboe_sample_rate_t getSampleRate() const {
- return mSampleRate;
- }
-
- AudioStreamBuilder* setSampleRate(oboe_sample_rate_t sampleRate) {
- mSampleRate = sampleRate;
- return this;
- }
-
- oboe_audio_format_t getFormat() const {
- return mFormat;
- }
-
- AudioStreamBuilder *setFormat(oboe_audio_format_t format) {
- mFormat = format;
- return this;
- }
-
- oboe_sharing_mode_t getSharingMode() const {
- return mSharingMode;
- }
-
- AudioStreamBuilder* setSharingMode(oboe_sharing_mode_t sharingMode) {
- mSharingMode = sharingMode;
- return this;
- }
-
- oboe_device_id_t getDeviceId() const {
- return mDeviceId;
- }
-
- AudioStreamBuilder* setDeviceId(oboe_device_id_t deviceId) {
- mDeviceId = deviceId;
- return this;
- }
-
- oboe_result_t build(AudioStream **streamPtr);
-
-private:
- int32_t mSamplesPerFrame = OBOE_UNSPECIFIED;
- oboe_sample_rate_t mSampleRate = OBOE_UNSPECIFIED;
- oboe_device_id_t mDeviceId = OBOE_DEVICE_UNSPECIFIED;
- oboe_sharing_mode_t mSharingMode = OBOE_SHARING_MODE_LEGACY;
- oboe_audio_format_t mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
- oboe_direction_t mDirection = OBOE_DIRECTION_OUTPUT;
-};
-
-} /* namespace oboe */
-
-#endif /* OBOE_AUDIOSTREAMBUILDER_H */
diff --git a/media/liboboe/src/core/OboeAudio.cpp b/media/liboboe/src/core/OboeAudio.cpp
deleted file mode 100644
index d98ca36..0000000
--- a/media/liboboe/src/core/OboeAudio.cpp
+++ /dev/null
@@ -1,562 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <time.h>
-#include <pthread.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-
-#include "AudioStreamBuilder.h"
-#include "AudioStream.h"
-#include "AudioClock.h"
-#include "client/AudioStreamInternal.h"
-#include "HandleTracker.h"
-
-using namespace oboe;
-
-// This is not the maximum theoretic possible number of handles that the HandlerTracker
-// class could support; instead it is the maximum number of handles that we are configuring
-// for our HandleTracker instance (sHandleTracker).
-#define OBOE_MAX_HANDLES 64
-
-// Macros for common code that includes a return.
-// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
-#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
- convertOboeBuilderToStreamBuilder(builder); \
- if (streamBuilder == nullptr) { \
- return OBOE_ERROR_INVALID_HANDLE; \
- }
-
-#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
- CONVERT_BUILDER_HANDLE_OR_RETURN() \
- if ((resultPtr) == nullptr) { \
- return OBOE_ERROR_NULL; \
- }
-
-#define CONVERT_STREAM_HANDLE_OR_RETURN() \
- convertOboeStreamToAudioStream(stream); \
- if (audioStream == nullptr) { \
- return OBOE_ERROR_INVALID_HANDLE; \
- }
-
-#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
- CONVERT_STREAM_HANDLE_OR_RETURN(); \
- if ((resultPtr) == nullptr) { \
- return OBOE_ERROR_NULL; \
- }
-
-// Static data.
-// TODO static constructors are discouraged, alternatives?
-static HandleTracker sHandleTracker(OBOE_MAX_HANDLES);
-
-typedef enum
-{
- OBOE_HANDLE_TYPE_STREAM,
- OBOE_HANDLE_TYPE_STREAM_BUILDER,
- OBOE_HANDLE_TYPE_COUNT
-} oboe_handle_type_t;
-static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
-
-
-#define OBOE_CASE_ENUM(name) case name: return #name
-
-OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode) {
- switch (returnCode) {
- OBOE_CASE_ENUM(OBOE_OK);
- OBOE_CASE_ENUM(OBOE_ERROR_ILLEGAL_ARGUMENT);
- OBOE_CASE_ENUM(OBOE_ERROR_INCOMPATIBLE);
- OBOE_CASE_ENUM(OBOE_ERROR_INTERNAL);
- OBOE_CASE_ENUM(OBOE_ERROR_INVALID_STATE);
- OBOE_CASE_ENUM(OBOE_ERROR_INVALID_HANDLE);
- OBOE_CASE_ENUM(OBOE_ERROR_INVALID_QUERY);
- OBOE_CASE_ENUM(OBOE_ERROR_UNIMPLEMENTED);
- OBOE_CASE_ENUM(OBOE_ERROR_UNAVAILABLE);
- OBOE_CASE_ENUM(OBOE_ERROR_NO_FREE_HANDLES);
- OBOE_CASE_ENUM(OBOE_ERROR_NO_MEMORY);
- OBOE_CASE_ENUM(OBOE_ERROR_NULL);
- OBOE_CASE_ENUM(OBOE_ERROR_TIMEOUT);
- OBOE_CASE_ENUM(OBOE_ERROR_WOULD_BLOCK);
- OBOE_CASE_ENUM(OBOE_ERROR_INVALID_ORDER);
- OBOE_CASE_ENUM(OBOE_ERROR_OUT_OF_RANGE);
- }
- return "Unrecognized Oboe error.";
-}
-
-OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state) {
- switch (state) {
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_UNINITIALIZED);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_OPEN);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTING);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTED);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSING);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSED);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHING);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHED);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPING);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPED);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSING);
- OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSED);
- }
- return "Unrecognized Oboe state.";
-}
-
-#undef OBOE_CASE_ENUM
-
-static AudioStream *convertOboeStreamToAudioStream(OboeStream stream)
-{
- return (AudioStream *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM,
- (oboe_handle_t) stream);
-}
-
-static AudioStreamBuilder *convertOboeBuilderToStreamBuilder(OboeStreamBuilder builder)
-{
- return (AudioStreamBuilder *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM_BUILDER,
- (oboe_handle_t) builder);
-}
-
-OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder)
-{
- ALOGD("Oboe_createStreamBuilder(): check sHandleTracker.isInitialized ()");
- if (!sHandleTracker.isInitialized()) {
- return OBOE_ERROR_NO_MEMORY;
- }
- AudioStreamBuilder *audioStreamBuilder = new AudioStreamBuilder();
- if (audioStreamBuilder == nullptr) {
- return OBOE_ERROR_NO_MEMORY;
- }
- ALOGD("Oboe_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
- // TODO protect the put() with a Mutex
- OboeStreamBuilder handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM_BUILDER,
- audioStreamBuilder);
- if (handle < 0) {
- delete audioStreamBuilder;
- return static_cast<oboe_result_t>(handle);
- } else {
- *builder = handle;
- }
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
- oboe_device_id_t deviceId)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- streamBuilder->setDeviceId(deviceId);
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
- oboe_device_id_t *deviceId)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
- *deviceId = streamBuilder->getDeviceId();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
- oboe_sample_rate_t sampleRate)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- streamBuilder->setSampleRate(sampleRate);
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
- oboe_sample_rate_t *sampleRate)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
- *sampleRate = streamBuilder->getSampleRate();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
- int32_t samplesPerFrame)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- streamBuilder->setSamplesPerFrame(samplesPerFrame);
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
- int32_t *samplesPerFrame)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
- *samplesPerFrame = streamBuilder->getSamplesPerFrame();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
- oboe_direction_t direction)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- streamBuilder->setDirection(direction);
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
- oboe_direction_t *direction)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
- *direction = streamBuilder->getDirection();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
- oboe_audio_format_t format)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- streamBuilder->setFormat(format);
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
- oboe_audio_format_t *format)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
- *format = streamBuilder->getFormat();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
- oboe_sharing_mode_t sharingMode)
-{
- AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
- if ((sharingMode < 0) || (sharingMode >= OBOE_SHARING_MODE_COUNT)) {
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
- } else {
- streamBuilder->setSharingMode(sharingMode);
- return OBOE_OK;
- }
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
- oboe_sharing_mode_t *sharingMode)
-{
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
- *sharingMode = streamBuilder->getSharingMode();
- return OBOE_OK;
-}
-
-static oboe_result_t OboeInternal_openStream(AudioStreamBuilder *streamBuilder,
- OboeStream *streamPtr)
-{
- AudioStream *audioStream = nullptr;
- oboe_result_t result = streamBuilder->build(&audioStream);
- if (result != OBOE_OK) {
- return result;
- } else {
- // Create a handle for referencing the object.
- // TODO protect the put() with a Mutex
- OboeStream handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM, audioStream);
- if (handle < 0) {
- delete audioStream;
- return static_cast<oboe_result_t>(handle);
- }
- *streamPtr = handle;
- return OBOE_OK;
- }
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_openStream(OboeStreamBuilder builder,
- OboeStream *streamPtr)
-{
- ALOGD("OboeStreamBuilder_openStream(): builder = 0x%08X", builder);
- AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
- return OboeInternal_openStream(streamBuilder, streamPtr);
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_delete(OboeStreamBuilder builder)
-{
- // TODO protect the remove() with a Mutex
- AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
- sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM_BUILDER, builder);
- if (streamBuilder != nullptr) {
- delete streamBuilder;
- return OBOE_OK;
- }
- return OBOE_ERROR_INVALID_HANDLE;
-}
-
-OBOE_API oboe_result_t OboeStream_close(OboeStream stream)
-{
- // TODO protect the remove() with a Mutex
- AudioStream *audioStream = (AudioStream *)
- sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM, (oboe_handle_t)stream);
- if (audioStream != nullptr) {
- audioStream->close();
- delete audioStream;
- return OBOE_OK;
- }
- return OBOE_ERROR_INVALID_HANDLE;
-}
-
-OBOE_API oboe_result_t OboeStream_requestStart(OboeStream stream)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- ALOGD("OboeStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
- return audioStream->requestStart();
-}
-
-OBOE_API oboe_result_t OboeStream_requestPause(OboeStream stream)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- ALOGD("OboeStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
- return audioStream->requestPause();
-}
-
-OBOE_API oboe_result_t OboeStream_requestFlush(OboeStream stream)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- ALOGD("OboeStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
- return audioStream->requestFlush();
-}
-
-OBOE_API oboe_result_t OboeStream_requestStop(OboeStream stream)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- ALOGD("OboeStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
- return audioStream->requestStop();
-}
-
-OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
- oboe_stream_state_t inputState,
- oboe_stream_state_t *nextState,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
-
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
-}
-
-// ============================================================
-// Stream - non-blocking I/O
-// ============================================================
-
-OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
- void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- if (buffer == nullptr) {
- return OBOE_ERROR_NULL;
- }
- if (numFrames < 0) {
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
- } else if (numFrames == 0) {
- return 0;
- }
-
- oboe_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
- // ALOGD("OboeStream_read(): read returns %d", result);
-
- return result;
-}
-
-OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
- const void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- if (buffer == nullptr) {
- return OBOE_ERROR_NULL;
- }
- if (numFrames < 0) {
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
- } else if (numFrames == 0) {
- return 0;
- }
-
- oboe_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
- // ALOGD("OboeStream_write(): write returns %d", result);
-
- return result;
-}
-
-// ============================================================
-// Miscellaneous
-// ============================================================
-
-OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
- oboe_nanoseconds_t periodNanoseconds,
- oboe_audio_thread_proc_t *threadProc, void *arg)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- return audioStream->createThread(periodNanoseconds, threadProc, arg);
-}
-
-OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
- void **returnArg,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- return audioStream->joinThread(returnArg, timeoutNanoseconds);
-}
-
-// ============================================================
-// Stream - queries
-// ============================================================
-
-// TODO Use oboe_clockid_t all the way down through the C++ streams.
-static clockid_t OboeConvert_fromOboeClockId(oboe_clockid_t clockid)
-{
- clockid_t hostClockId;
- switch (clockid) {
- case OBOE_CLOCK_MONOTONIC:
- hostClockId = CLOCK_MONOTONIC;
- break;
- case OBOE_CLOCK_BOOTTIME:
- hostClockId = CLOCK_BOOTTIME;
- break;
- default:
- hostClockId = 0; // TODO review
- }
- return hostClockId;
-}
-
-oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid)
-{
- clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
- return AudioClock::getNanoseconds(hostClockId);
-}
-
-OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
- *sampleRate = audioStream->getSampleRate();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
- *samplesPerFrame = audioStream->getSamplesPerFrame();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
- *state = audioStream->getState();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
- *format = audioStream->getFormat();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
- oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- return audioStream->setBufferSize(requestedFrames, actualFrames);
-}
-
-OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
- *frames = audioStream->getBufferSize();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, int32_t *direction)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
- *direction = audioStream->getDirection();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream,
- oboe_size_frames_t *framesPerBurst)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
- *framesPerBurst = audioStream->getFramesPerBurst();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream,
- oboe_size_frames_t *capacity)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
- *capacity = audioStream->getBufferCapacity();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
- *xRunCount = audioStream->getXRunCount();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream,
- oboe_device_id_t *deviceId)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
- *deviceId = audioStream->getDeviceId();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
- oboe_sharing_mode_t *sharingMode)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
- *sharingMode = audioStream->getSharingMode();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
- oboe_position_frames_t *frames)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
- *frames = audioStream->getFramesWritten();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames)
-{
- AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
- *frames = audioStream->getFramesRead();
- return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
- oboe_clockid_t clockid,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds)
-{
- AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
- if (framePosition == nullptr) {
- return OBOE_ERROR_NULL;
- } else if (timeNanoseconds == nullptr) {
- return OBOE_ERROR_NULL;
- } else if (clockid != OBOE_CLOCK_MONOTONIC && clockid != OBOE_CLOCK_BOOTTIME) {
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
- }
-
- clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
- return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
-}
diff --git a/media/liboboe/src/core/README.md b/media/liboboe/src/core/README.md
deleted file mode 100644
index dd99286..0000000
--- a/media/liboboe/src/core/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-The core folder contains the essential Oboe files common to all implementations.
-The OboeAudio.cpp contains the 'C' API.
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.cpp b/media/liboboe/src/legacy/AudioStreamRecord.cpp
deleted file mode 100644
index 5854974..0000000
--- a/media/liboboe/src/legacy/AudioStreamRecord.cpp
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioStreamRecord"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <utils/String16.h>
-#include <media/AudioRecord.h>
-#include <oboe/OboeAudio.h>
-
-#include "AudioClock.h"
-#include "AudioStreamRecord.h"
-
-using namespace android;
-using namespace oboe;
-
-AudioStreamRecord::AudioStreamRecord()
- : AudioStream()
-{
-}
-
-AudioStreamRecord::~AudioStreamRecord()
-{
- const oboe_stream_state_t state = getState();
- bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
- ALOGE_IF(bad, "stream not closed, in state %d", state);
-}
-
-oboe_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
-{
- oboe_result_t result = OBOE_OK;
-
- result = AudioStream::open(builder);
- if (result != OBOE_OK) {
- return result;
- }
-
- // Try to create an AudioRecord
-
- // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
-
- AudioRecord::callback_t callback = nullptr;
- audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
-
- // TODO implement an unspecified Android format then use that.
- audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
- ? AUDIO_FORMAT_PCM_FLOAT
- : OboeConvert_oboeToAndroidDataFormat(getFormat());
-
- mAudioRecord = new AudioRecord(
- AUDIO_SOURCE_DEFAULT,
- getSampleRate(),
- format,
- channelMask,
-
- mOpPackageName, // const String16& opPackageName TODO does not compile
-
- 0, // size_t frameCount = 0,
- callback,
- nullptr, // void* user = nullptr,
- 0, // uint32_t notificationFrames = 0,
- AUDIO_SESSION_ALLOCATE,
- AudioRecord::TRANSFER_DEFAULT,
- flags
- // int uid = -1,
- // pid_t pid = -1,
- // const audio_attributes_t* pAttributes = nullptr
- );
-
- // Did we get a valid track?
- status_t status = mAudioRecord->initCheck();
- if (status != OK) {
- close();
- ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
- return OboeConvert_androidToOboeError(status);
- }
-
- // Get the actual rate.
- setSampleRate(mAudioRecord->getSampleRate());
- setSamplesPerFrame(mAudioRecord->channelCount());
- setFormat(OboeConvert_androidToOboeDataFormat(mAudioRecord->format()));
-
- setState(OBOE_STREAM_STATE_OPEN);
-
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamRecord::close()
-{
- // TODO add close() or release() to AudioRecord API then call it from here
- if (getState() != OBOE_STREAM_STATE_CLOSED) {
- mAudioRecord.clear();
- setState(OBOE_STREAM_STATE_CLOSED);
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamRecord::requestStart()
-{
- if (mAudioRecord.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- }
- // Get current position so we can detect when the track is playing.
- status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- }
- err = mAudioRecord->start();
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- } else {
- setState(OBOE_STREAM_STATE_STARTING);
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamRecord::requestPause()
-{
- return OBOE_ERROR_UNIMPLEMENTED;
-}
-
-oboe_result_t AudioStreamRecord::requestFlush() {
- return OBOE_ERROR_UNIMPLEMENTED;
-}
-
-oboe_result_t AudioStreamRecord::requestStop() {
- if (mAudioRecord.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- }
- setState(OBOE_STREAM_STATE_STOPPING);
- mAudioRecord->stop();
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamRecord::updateState()
-{
- oboe_result_t result = OBOE_OK;
- oboe_wrapping_frames_t position;
- status_t err;
- switch (getState()) {
- // TODO add better state visibility to AudioRecord
- case OBOE_STREAM_STATE_STARTING:
- err = mAudioRecord->getPosition(&position);
- if (err != OK) {
- result = OboeConvert_androidToOboeError(err);
- } else if (position != mPositionWhenStarting) {
- setState(OBOE_STREAM_STATE_STARTED);
- }
- break;
- case OBOE_STREAM_STATE_STOPPING:
- if (mAudioRecord->stopped()) {
- setState(OBOE_STREAM_STATE_STOPPED);
- }
- break;
- default:
- break;
- }
- return result;
-}
-
-oboe_result_t AudioStreamRecord::read(void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
- oboe_size_bytes_t numBytes;
- oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
- if (result != OBOE_OK) {
- return result;
- }
-
- // TODO add timeout to AudioRecord
- bool blocking = (timeoutNanoseconds > 0);
- ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
- if (bytesRead == WOULD_BLOCK) {
- return 0;
- } else if (bytesRead < 0) {
- return OboeConvert_androidToOboeError(bytesRead);
- }
- oboe_size_frames_t framesRead = (oboe_size_frames_t)(bytesRead / bytesPerFrame);
- return (oboe_result_t) framesRead;
-}
-
-oboe_result_t AudioStreamRecord::setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames)
-{
- *actualFrames = getBufferCapacity();
- return OBOE_OK;
-}
-
-oboe_size_frames_t AudioStreamRecord::getBufferSize() const
-{
- return getBufferCapacity(); // TODO implement in AudioRecord?
-}
-
-oboe_size_frames_t AudioStreamRecord::getBufferCapacity() const
-{
- return static_cast<oboe_size_frames_t>(mAudioRecord->frameCount());
-}
-
-int32_t AudioStreamRecord::getXRunCount() const
-{
- return OBOE_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
-}
-
-oboe_size_frames_t AudioStreamRecord::getFramesPerBurst() const
-{
- return 192; // TODO add query to AudioRecord.cpp
-}
-
-// TODO implement getTimestamp
-
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.h b/media/liboboe/src/legacy/AudioStreamRecord.h
deleted file mode 100644
index 02ff220..0000000
--- a/media/liboboe/src/legacy/AudioStreamRecord.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LEGACY_AUDIOSTREAMRECORD_H
-#define LEGACY_AUDIOSTREAMRECORD_H
-
-#include <media/AudioRecord.h>
-#include <oboe/OboeAudio.h>
-
-#include "AudioStreamBuilder.h"
-#include "AudioStream.h"
-#include "OboeLegacy.h"
-
-namespace oboe {
-
-/**
- * Internal stream that uses the legacy AudioTrack path.
- */
-class AudioStreamRecord : public AudioStream {
-public:
- AudioStreamRecord();
-
- virtual ~AudioStreamRecord();
-
- virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
- virtual oboe_result_t close() override;
-
- virtual oboe_result_t requestStart() override;
- virtual oboe_result_t requestPause() override;
- virtual oboe_result_t requestFlush() override;
- virtual oboe_result_t requestStop() override;
-
- virtual oboe_result_t getTimestamp(clockid_t clockId,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds) override {
- return OBOE_ERROR_UNIMPLEMENTED; // TODO
- }
-
- virtual oboe_result_t read(void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds) override;
-
- virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames) override;
-
- virtual oboe_size_frames_t getBufferSize() const override;
-
- virtual oboe_size_frames_t getBufferCapacity() const override;
-
- virtual int32_t getXRunCount() const override;
-
- virtual oboe_size_frames_t getFramesPerBurst() const override;
-
- virtual oboe_result_t updateState() override;
-
-private:
- android::sp<android::AudioRecord> mAudioRecord;
- // TODO add 64-bit position reporting to AudioRecord and use it.
- oboe_wrapping_frames_t mPositionWhenStarting = 0;
- android::String16 mOpPackageName;
-};
-
-} /* namespace oboe */
-
-#endif /* LEGACY_AUDIOSTREAMRECORD_H */
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.cpp b/media/liboboe/src/legacy/AudioStreamTrack.cpp
deleted file mode 100644
index b2c4ee1..0000000
--- a/media/liboboe/src/legacy/AudioStreamTrack.cpp
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioStreamTrack"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <media/AudioTrack.h>
-
-#include <oboe/OboeAudio.h>
-#include "AudioClock.h"
-#include "AudioStreamTrack.h"
-
-
-using namespace android;
-using namespace oboe;
-
-/*
- * Create a stream that uses the AudioTrack.
- */
-AudioStreamTrack::AudioStreamTrack()
- : AudioStream()
-{
-}
-
-AudioStreamTrack::~AudioStreamTrack()
-{
- const oboe_stream_state_t state = getState();
- bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
- ALOGE_IF(bad, "stream not closed, in state %d", state);
-}
-
-oboe_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
-{
- oboe_result_t result = OBOE_OK;
-
- result = AudioStream::open(builder);
- if (result != OK) {
- return result;
- }
-
- // Try to create an AudioTrack
- // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
- ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
- samplesPerFrame, channelMask);
-
- AudioTrack::callback_t callback = nullptr;
- // TODO add more performance options
- audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
- size_t frameCount = 0;
- // TODO implement an unspecified AudioTrack format then use that.
- audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
- ? AUDIO_FORMAT_PCM_FLOAT
- : OboeConvert_oboeToAndroidDataFormat(getFormat());
-
- mAudioTrack = new AudioTrack(
- (audio_stream_type_t) AUDIO_STREAM_MUSIC,
- getSampleRate(),
- format,
- channelMask,
- frameCount,
- flags,
- callback,
- nullptr, // user callback data
- 0, // notificationFrames
- AUDIO_SESSION_ALLOCATE,
- AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
- );
-
- // Did we get a valid track?
- status_t status = mAudioTrack->initCheck();
- ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
- // FIXME - this should work - if (status != NO_ERROR) {
- // But initCheck() is returning 1 !
- if (status < 0) {
- close();
- ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
- return OboeConvert_androidToOboeError(status);
- }
-
- // Get the actual values from the AudioTrack.
- setSamplesPerFrame(mAudioTrack->channelCount());
- setSampleRate(mAudioTrack->getSampleRate());
- setFormat(OboeConvert_androidToOboeDataFormat(mAudioTrack->format()));
-
- setState(OBOE_STREAM_STATE_OPEN);
-
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::close()
-{
- // TODO maybe add close() or release() to AudioTrack API then call it from here
- if (getState() != OBOE_STREAM_STATE_CLOSED) {
- mAudioTrack.clear(); // TODO is this right?
- setState(OBOE_STREAM_STATE_CLOSED);
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::requestStart()
-{
- if (mAudioTrack.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- }
- // Get current position so we can detect when the track is playing.
- status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- }
- err = mAudioTrack->start();
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- } else {
- setState(OBOE_STREAM_STATE_STARTING);
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::requestPause()
-{
- if (mAudioTrack.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- } else if (getState() != OBOE_STREAM_STATE_STARTING
- && getState() != OBOE_STREAM_STATE_STARTED) {
- ALOGE("requestPause(), called when state is %s", Oboe_convertStreamStateToText(getState()));
- return OBOE_ERROR_INVALID_STATE;
- }
- setState(OBOE_STREAM_STATE_PAUSING);
- mAudioTrack->pause();
- status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::requestFlush() {
- if (mAudioTrack.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- } else if (getState() != OBOE_STREAM_STATE_PAUSED) {
- return OBOE_ERROR_INVALID_STATE;
- }
- setState(OBOE_STREAM_STATE_FLUSHING);
- incrementFramesRead(getFramesWritten() - getFramesRead());
- mAudioTrack->flush();
- mFramesWritten.reset32();
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::requestStop() {
- if (mAudioTrack.get() == nullptr) {
- return OBOE_ERROR_INVALID_STATE;
- }
- setState(OBOE_STREAM_STATE_STOPPING);
- incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
- mAudioTrack->stop();
- mFramesWritten.reset32();
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::updateState()
-{
- status_t err;
- oboe_wrapping_frames_t position;
- switch (getState()) {
- // TODO add better state visibility to AudioTrack
- case OBOE_STREAM_STATE_STARTING:
- if (mAudioTrack->hasStarted()) {
- setState(OBOE_STREAM_STATE_STARTED);
- }
- break;
- case OBOE_STREAM_STATE_PAUSING:
- if (mAudioTrack->stopped()) {
- err = mAudioTrack->getPosition(&position);
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- } else if (position == mPositionWhenPausing) {
- // Has stream really stopped advancing?
- setState(OBOE_STREAM_STATE_PAUSED);
- }
- mPositionWhenPausing = position;
- }
- break;
- case OBOE_STREAM_STATE_FLUSHING:
- {
- err = mAudioTrack->getPosition(&position);
- if (err != OK) {
- return OboeConvert_androidToOboeError(err);
- } else if (position == 0) {
- // Advance frames read to match written.
- setState(OBOE_STREAM_STATE_FLUSHED);
- }
- }
- break;
- case OBOE_STREAM_STATE_STOPPING:
- if (mAudioTrack->stopped()) {
- setState(OBOE_STREAM_STATE_STOPPED);
- }
- break;
- default:
- break;
- }
- return OBOE_OK;
-}
-
-oboe_result_t AudioStreamTrack::write(const void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds)
-{
- oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
- oboe_size_bytes_t numBytes;
- oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
- if (result != OBOE_OK) {
- return result;
- }
-
- // TODO add timeout to AudioTrack
- bool blocking = timeoutNanoseconds > 0;
- ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
- if (bytesWritten == WOULD_BLOCK) {
- return 0;
- } else if (bytesWritten < 0) {
- ALOGE("invalid write, returned %d", (int)bytesWritten);
- return OboeConvert_androidToOboeError(bytesWritten);
- }
- oboe_size_frames_t framesWritten = (oboe_size_frames_t)(bytesWritten / bytesPerFrame);
- incrementFramesWritten(framesWritten);
- return framesWritten;
-}
-
-oboe_result_t AudioStreamTrack::setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames)
-{
- ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
- if (result != OK) {
- return OboeConvert_androidToOboeError(result);
- } else {
- *actualFrames = result;
- return OBOE_OK;
- }
-}
-
-oboe_size_frames_t AudioStreamTrack::getBufferSize() const
-{
- return static_cast<oboe_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
-}
-
-oboe_size_frames_t AudioStreamTrack::getBufferCapacity() const
-{
- return static_cast<oboe_size_frames_t>(mAudioTrack->frameCount());
-}
-
-int32_t AudioStreamTrack::getXRunCount() const
-{
- return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
-}
-
-int32_t AudioStreamTrack::getFramesPerBurst() const
-{
- return 192; // TODO add query to AudioTrack.cpp
-}
-
-oboe_position_frames_t AudioStreamTrack::getFramesRead() {
- oboe_wrapping_frames_t position;
- status_t result;
- switch (getState()) {
- case OBOE_STREAM_STATE_STARTING:
- case OBOE_STREAM_STATE_STARTED:
- case OBOE_STREAM_STATE_STOPPING:
- result = mAudioTrack->getPosition(&position);
- if (result == OK) {
- mFramesRead.update32(position);
- }
- break;
- default:
- break;
- }
- return AudioStream::getFramesRead();
-}
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.h b/media/liboboe/src/legacy/AudioStreamTrack.h
deleted file mode 100644
index 8c40884..0000000
--- a/media/liboboe/src/legacy/AudioStreamTrack.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LEGACY_AUDIOSTREAMTRACK_H
-#define LEGACY_AUDIOSTREAMTRACK_H
-
-#include <media/AudioTrack.h>
-#include <oboe/OboeAudio.h>
-
-#include "AudioStreamBuilder.h"
-#include "AudioStream.h"
-#include "OboeLegacy.h"
-
-namespace oboe {
-
-
-/**
- * Internal stream that uses the legacy AudioTrack path.
- */
-class AudioStreamTrack : public AudioStream {
-public:
- AudioStreamTrack();
-
- virtual ~AudioStreamTrack();
-
-
- virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
- virtual oboe_result_t close() override;
-
- virtual oboe_result_t requestStart() override;
- virtual oboe_result_t requestPause() override;
- virtual oboe_result_t requestFlush() override;
- virtual oboe_result_t requestStop() override;
-
- virtual oboe_result_t getTimestamp(clockid_t clockId,
- oboe_position_frames_t *framePosition,
- oboe_nanoseconds_t *timeNanoseconds) override {
- return OBOE_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
- }
-
- virtual oboe_result_t write(const void *buffer,
- oboe_size_frames_t numFrames,
- oboe_nanoseconds_t timeoutNanoseconds) override;
-
- virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
- oboe_size_frames_t *actualFrames) override;
- virtual oboe_size_frames_t getBufferSize() const override;
- virtual oboe_size_frames_t getBufferCapacity() const override;
- virtual oboe_size_frames_t getFramesPerBurst()const override;
- virtual int32_t getXRunCount() const override;
-
- virtual oboe_position_frames_t getFramesRead() override;
-
- virtual oboe_result_t updateState() override;
-
-private:
- android::sp<android::AudioTrack> mAudioTrack;
- // TODO add 64-bit position reporting to AudioRecord and use it.
- oboe_wrapping_frames_t mPositionWhenStarting = 0;
- oboe_wrapping_frames_t mPositionWhenPausing = 0;
-};
-
-} /* namespace oboe */
-
-#endif /* LEGACY_AUDIOSTREAMTRACK_H */
diff --git a/media/liboboe/src/legacy/README.md b/media/liboboe/src/legacy/README.md
deleted file mode 100644
index b51c44b..0000000
--- a/media/liboboe/src/legacy/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-The legacy folder contains the classes that implement Oboe AudioStream on top of
-Android AudioTrack and AudioRecord.
diff --git a/media/liboboe/src/utility/OboeUtilities.cpp b/media/liboboe/src/utility/OboeUtilities.cpp
deleted file mode 100644
index d9d2e88..0000000
--- a/media/liboboe/src/utility/OboeUtilities.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <utils/Errors.h>
-
-#include "oboe/OboeDefinitions.h"
-#include "OboeUtilities.h"
-
-using namespace android;
-
-oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format) {
- oboe_size_bytes_t size = OBOE_ERROR_ILLEGAL_ARGUMENT;
- switch (format) {
- case OBOE_AUDIO_FORMAT_PCM16:
- size = sizeof(int16_t);
- break;
- case OBOE_AUDIO_FORMAT_PCM32:
- case OBOE_AUDIO_FORMAT_PCM824:
- size = sizeof(int32_t);
- break;
- case OBOE_AUDIO_FORMAT_PCM_FLOAT:
- size = sizeof(float);
- break;
- default:
- break;
- }
- return size;
-}
-
-// TODO This similar to a function in audio_utils. Consider using that instead.
-void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination) {
- for (int i = 0; i < numSamples; i++) {
- float fval = source[i];
- fval += 1.0; // to avoid discontinuity at 0.0 caused by truncation
- fval *= 32768.0f;
- int32_t sample = (int32_t) fval;
- // clip to 16-bit range
- if (sample < 0) sample = 0;
- else if (sample > 0x0FFFF) sample = 0x0FFFF;
- sample -= 32768; // center at zero
- destination[i] = (int16_t) sample;
- }
-}
-
-void OboeConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
- for (int i = 0; i < numSamples; i++) {
- destination[i] = source[i] * (1.0f / 32768.0f);
- }
-}
-
-oboe_result_t OboeConvert_androidToOboeError(status_t error) {
- if (error >= 0) {
- return error;
- }
- oboe_result_t result;
- switch (error) {
- case OK:
- result = OBOE_OK;
- break;
- case INVALID_OPERATION:
- result = OBOE_ERROR_INVALID_STATE;
- break;
- case BAD_VALUE:
- result = OBOE_ERROR_UNEXPECTED_VALUE;
- break;
- case WOULD_BLOCK:
- result = OBOE_ERROR_WOULD_BLOCK;
- break;
- // TODO add more error codes
- default:
- result = OBOE_ERROR_INTERNAL;
- break;
- }
- return result;
-}
-
-audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboeFormat) {
- audio_format_t androidFormat;
- switch (oboeFormat) {
- case OBOE_AUDIO_FORMAT_PCM16:
- androidFormat = AUDIO_FORMAT_PCM_16_BIT;
- break;
- case OBOE_AUDIO_FORMAT_PCM_FLOAT:
- androidFormat = AUDIO_FORMAT_PCM_FLOAT;
- break;
- case OBOE_AUDIO_FORMAT_PCM824:
- androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
- break;
- case OBOE_AUDIO_FORMAT_PCM32:
- androidFormat = AUDIO_FORMAT_PCM_32_BIT;
- break;
- default:
- androidFormat = AUDIO_FORMAT_DEFAULT;
- ALOGE("OboeConvert_oboeToAndroidDataFormat 0x%08X unrecognized", oboeFormat);
- break;
- }
- return androidFormat;
-}
-
-oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t androidFormat) {
- oboe_audio_format_t oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
- switch (androidFormat) {
- case AUDIO_FORMAT_PCM_16_BIT:
- oboeFormat = OBOE_AUDIO_FORMAT_PCM16;
- break;
- case AUDIO_FORMAT_PCM_FLOAT:
- oboeFormat = OBOE_AUDIO_FORMAT_PCM_FLOAT;
- break;
- case AUDIO_FORMAT_PCM_32_BIT:
- oboeFormat = OBOE_AUDIO_FORMAT_PCM32;
- break;
- case AUDIO_FORMAT_PCM_8_24_BIT:
- oboeFormat = OBOE_AUDIO_FORMAT_PCM824;
- break;
- default:
- oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
- ALOGE("OboeConvert_androidToOboeDataFormat 0x%08X unrecognized", androidFormat);
- break;
- }
- return oboeFormat;
-}
-
-oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
- oboe_size_bytes_t bytesPerFrame,
- oboe_size_bytes_t *sizeInBytes) {
- // TODO implement more elegantly
- const int32_t maxChannels = 256; // ridiculously large
- const oboe_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
- // Prevent overflow by limiting multiplicands.
- if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
- ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
- return OBOE_ERROR_OUT_OF_RANGE;
- }
- *sizeInBytes = numFrames * bytesPerFrame;
- return OBOE_OK;
-}
diff --git a/media/liboboe/src/utility/OboeUtilities.h b/media/liboboe/src/utility/OboeUtilities.h
deleted file mode 100644
index 974ccf6..0000000
--- a/media/liboboe/src/utility/OboeUtilities.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef UTILITY_OBOEUTILITIES_H
-#define UTILITY_OBOEUTILITIES_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/Errors.h>
-#include <hardware/audio.h>
-
-#include "oboe/OboeDefinitions.h"
-
-oboe_result_t OboeConvert_androidToOboeError(android::status_t error);
-
-void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination);
-
-void OboeConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination);
-
-/**
- * Calculate the number of bytes and prevent numeric overflow.
- * @param numFrames frame count
- * @param bytesPerFrame size of a frame in bytes
- * @param sizeInBytes total size in bytes
- * @return OBOE_OK or negative error, eg. OBOE_ERROR_OUT_OF_RANGE
- */
-oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
- oboe_size_bytes_t bytesPerFrame,
- oboe_size_bytes_t *sizeInBytes);
-
-audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboe_format);
-
-oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t format);
-
-/**
- * @return the size of a sample of the given format in bytes or OBOE_ERROR_ILLEGAL_ARGUMENT
- */
-oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format);
-
-#endif //UTILITY_OBOEUTILITIES_H
diff --git a/media/liboboe/src/utility/README.md b/media/liboboe/src/utility/README.md
deleted file mode 100644
index 9db926a..0000000
--- a/media/liboboe/src/utility/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-The utility folder contains things that may be shared between the Oboe client and server.
-They might also be handy outside Oboe.
-They generally do not depend on Oboe functionality.
diff --git a/media/liboboe/tests/Android.mk b/media/liboboe/tests/Android.mk
deleted file mode 100644
index 165669b..0000000
--- a/media/liboboe/tests/Android.mk
+++ /dev/null
@@ -1,42 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include \
- frameworks/av/media/liboboe/src/core \
- frameworks/av/media/liboboe/src/utility
-LOCAL_SRC_FILES := test_oboe_api.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
-LOCAL_STATIC_LIBRARIES := liboboe
-LOCAL_MODULE := test_oboe_api
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include \
- frameworks/av/media/liboboe/src/core \
- frameworks/av/media/liboboe/src/utility
-LOCAL_SRC_FILES:= test_handle_tracker.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
-LOCAL_STATIC_LIBRARIES := liboboe
-LOCAL_MODULE := test_handle_tracker
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include \
- frameworks/av/media/liboboe/src \
- frameworks/av/media/liboboe/src/core \
- frameworks/av/media/liboboe/src/fifo \
- frameworks/av/media/liboboe/src/utility
-LOCAL_SRC_FILES:= test_marshalling.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
-LOCAL_STATIC_LIBRARIES := liboboe
-LOCAL_MODULE := test_marshalling
-include $(BUILD_NATIVE_TEST)
diff --git a/media/liboboe/tests/test_oboe_api.cpp b/media/liboboe/tests/test_oboe_api.cpp
deleted file mode 100644
index 0bc469f..0000000
--- a/media/liboboe/tests/test_oboe_api.cpp
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Unit tests for Oboe 'C' API.
-
-#include <stdlib.h>
-#include <math.h>
-
-#include <gtest/gtest.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "OboeUtilities.h"
-
-#define DEFAULT_STATE_TIMEOUT (500 * OBOE_NANOS_PER_MILLISECOND)
-
-// Test OboeStreamBuilder
-TEST(test_oboe_api, oboe_stream_builder) {
- const oboe_sample_rate_t requestedSampleRate1 = 48000;
- const oboe_sample_rate_t requestedSampleRate2 = 44100;
- const int32_t requestedSamplesPerFrame = 2;
- const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
-
- oboe_sample_rate_t sampleRate = 0;
- int32_t samplesPerFrame = 0;
- oboe_audio_format_t actualDataFormat;
- OboeStreamBuilder oboeBuilder1;
- OboeStreamBuilder oboeBuilder2;
-
- oboe_result_t result = OBOE_OK;
-
- // Use an OboeStreamBuilder to define the stream.
- result = Oboe_createStreamBuilder(&oboeBuilder1);
- ASSERT_EQ(OBOE_OK, result);
-
- // Request stream properties.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder1, requestedSampleRate1));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder1, requestedSamplesPerFrame));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder1, requestedDataFormat));
-
- // Check to make sure builder saved the properties.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
- EXPECT_EQ(requestedSampleRate1, sampleRate);
-
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSamplesPerFrame(oboeBuilder1, &samplesPerFrame));
- EXPECT_EQ(requestedSamplesPerFrame, samplesPerFrame);
-
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getFormat(oboeBuilder1, &actualDataFormat));
- EXPECT_EQ(requestedDataFormat, actualDataFormat);
-
- result = OboeStreamBuilder_getSampleRate(0x0BADCAFE, &sampleRate); // ridiculous token
- EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
-
- // Create a second builder and make sure they do not collide.
- ASSERT_EQ(OBOE_OK, Oboe_createStreamBuilder(&oboeBuilder2));
- ASSERT_NE(oboeBuilder1, oboeBuilder2);
-
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder2, requestedSampleRate2));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
- EXPECT_EQ(requestedSampleRate1, sampleRate);
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
- EXPECT_EQ(requestedSampleRate2, sampleRate);
-
- // Delete the builder.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder1));
-
- // Now it should no longer be valid.
- // Note that test assumes we are using the HandleTracker. If we use plain pointers
- // then it will be difficult to detect this kind of error.
- result = OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate); // stale token
- EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
-
- // Second builder should still be valid.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
- EXPECT_EQ(requestedSampleRate2, sampleRate);
-
- // Delete the second builder.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder2));
-
- // Now it should no longer be valid. Assumes HandlerTracker used.
- EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
-}
-
-// Test creating a default stream with everything unspecified.
-TEST(test_oboe_api, oboe_stream_unspecified) {
- OboeStreamBuilder oboeBuilder;
- OboeStream oboeStream;
- oboe_result_t result = OBOE_OK;
-
- // Use an OboeStreamBuilder to define the stream.
- result = Oboe_createStreamBuilder(&oboeBuilder);
- ASSERT_EQ(OBOE_OK, result);
-
- // Create an OboeStream using the Builder.
- ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
-
- // Cleanup
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
- EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
-
-// Test Writing to an OboeStream
-void runtest_oboe_stream(oboe_sharing_mode_t requestedSharingMode) {
- const oboe_sample_rate_t requestedSampleRate = 48000;
- const oboe_sample_rate_t requestedSamplesPerFrame = 2;
- const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
-
- oboe_sample_rate_t actualSampleRate = -1;
- int32_t actualSamplesPerFrame = -1;
- oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_INVALID;
- oboe_sharing_mode_t actualSharingMode;
- oboe_size_frames_t framesPerBurst = -1;
- int writeLoops = 0;
-
- oboe_size_frames_t framesWritten = 0;
- oboe_size_frames_t framesPrimed = 0;
- oboe_position_frames_t framesTotal = 0;
- oboe_position_frames_t oboeFramesRead = 0;
- oboe_position_frames_t oboeFramesRead1 = 0;
- oboe_position_frames_t oboeFramesRead2 = 0;
- oboe_position_frames_t oboeFramesWritten = 0;
-
- oboe_nanoseconds_t timeoutNanos;
-
- oboe_stream_state_t state = OBOE_STREAM_STATE_UNINITIALIZED;
- OboeStreamBuilder oboeBuilder;
- OboeStream oboeStream;
-
- oboe_result_t result = OBOE_OK;
-
- // Use an OboeStreamBuilder to define the stream.
- result = Oboe_createStreamBuilder(&oboeBuilder);
- ASSERT_EQ(OBOE_OK, result);
-
- // Request stream properties.
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder, requestedSampleRate));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder, requestedSamplesPerFrame));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder, requestedDataFormat));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSharingMode(oboeBuilder, requestedSharingMode));
-
- // Create an OboeStream using the Builder.
- ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
-
- EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
- EXPECT_EQ(OBOE_STREAM_STATE_OPEN, state);
-
- // Check to see what kind of stream we actually got.
- EXPECT_EQ(OBOE_OK, OboeStream_getSampleRate(oboeStream, &actualSampleRate));
- ASSERT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000); // TODO what is range?
-
- EXPECT_EQ(OBOE_OK, OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame));
- ASSERT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
-
- EXPECT_EQ(OBOE_OK, OboeStream_getSharingMode(oboeStream, &actualSharingMode));
- ASSERT_TRUE(actualSharingMode == OBOE_SHARING_MODE_EXCLUSIVE
- || actualSharingMode == OBOE_SHARING_MODE_LEGACY);
-
- EXPECT_EQ(OBOE_OK, OboeStream_getFormat(oboeStream, &actualDataFormat));
- EXPECT_NE(OBOE_AUDIO_FORMAT_INVALID, actualDataFormat);
-
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst));
- ASSERT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
-
- // Allocate a buffer for the audio data.
- // TODO handle possibility of other data formats
- ASSERT_TRUE(actualDataFormat == OBOE_AUDIO_FORMAT_PCM16);
- size_t dataSizeSamples = framesPerBurst * actualSamplesPerFrame;
- int16_t *data = new int16_t[dataSizeSamples];
- ASSERT_TRUE(nullptr != data);
- memset(data, 0, sizeof(int16_t) * dataSizeSamples);
-
- // Prime the buffer.
- timeoutNanos = 0;
- do {
- framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
- // There should be some room for priming the buffer.
- framesTotal += framesWritten;
- ASSERT_GE(framesWritten, 0);
- ASSERT_LE(framesWritten, framesPerBurst);
- } while (framesWritten > 0);
- ASSERT_TRUE(framesTotal > 0);
-
- // Start/write/pause more than once to see if it fails after the first time.
- // Write some data and measure the rate to see if the timing is OK.
- for (int numLoops = 0; numLoops < 2; numLoops++) {
- // Start and wait for server to respond.
- ASSERT_EQ(OBOE_OK, OboeStream_requestStart(oboeStream));
- ASSERT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
- OBOE_STREAM_STATE_STARTING,
- &state,
- DEFAULT_STATE_TIMEOUT));
- EXPECT_EQ(OBOE_STREAM_STATE_STARTED, state);
-
- // Write some data while we are running. Read counter should be advancing.
- writeLoops = 1 * actualSampleRate / framesPerBurst; // 1 second
- ASSERT_LT(2, writeLoops); // detect absurdly high framesPerBurst
- timeoutNanos = 10 * OBOE_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
- framesWritten = 1;
- ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
- oboeFramesRead1 = oboeFramesRead;
- oboe_nanoseconds_t beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
- do {
- framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
- ASSERT_GE(framesWritten, 0);
- ASSERT_LE(framesWritten, framesPerBurst);
-
- framesTotal += framesWritten;
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
- EXPECT_EQ(framesTotal, oboeFramesWritten);
-
- // Try to get a more accurate measure of the sample rate.
- if (beginTime == 0) {
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
- if (oboeFramesRead > oboeFramesRead1) { // is read pointer advancing
- beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
- oboeFramesRead1 = oboeFramesRead;
- }
- }
- } while (framesWritten > 0 && writeLoops-- > 0);
-
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
- oboe_nanoseconds_t endTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
- ASSERT_GT(oboeFramesRead2, 0);
- ASSERT_GT(oboeFramesRead2, oboeFramesRead1);
- ASSERT_LE(oboeFramesRead2, oboeFramesWritten);
-
- // TODO why is legacy so inaccurate?
- const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
- if (requestedSharingMode != OBOE_SHARING_MODE_LEGACY) {
- // Calculate approximate sample rate and compare with stream rate.
- double seconds = (endTime - beginTime) / (double) OBOE_NANOS_PER_SECOND;
- double measuredRate = (oboeFramesRead2 - oboeFramesRead1) / seconds;
- ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
- }
-
- // Request async pause and wait for server to say that it has completed the pause.
- ASSERT_EQ(OBOE_OK, OboeStream_requestPause(oboeStream));
- EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
- OBOE_STREAM_STATE_PAUSING,
- &state,
- DEFAULT_STATE_TIMEOUT));
- EXPECT_EQ(OBOE_STREAM_STATE_PAUSED, state);
- }
-
- // Make sure the read counter is not advancing when we are paused.
- ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
- ASSERT_GE(oboeFramesRead, oboeFramesRead2); // monotonic increase
-
- // Use this to sleep by waiting for something that won't happen.
- OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state, timeoutNanos);
- ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
- EXPECT_EQ(oboeFramesRead, oboeFramesRead2);
-
- // ------------------- TEST FLUSH -----------------
- // Prime the buffer.
- timeoutNanos = 0;
- writeLoops = 100;
- do {
- framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
- framesTotal += framesWritten;
- } while (framesWritten > 0 && writeLoops-- > 0);
- EXPECT_EQ(0, framesWritten);
-
- // Flush and wait for server to respond.
- ASSERT_EQ(OBOE_OK, OboeStream_requestFlush(oboeStream));
- EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
- OBOE_STREAM_STATE_FLUSHING,
- &state,
- DEFAULT_STATE_TIMEOUT));
- EXPECT_EQ(OBOE_STREAM_STATE_FLUSHED, state);
-
- // After a flush, the read counter should be caught up with the write counter.
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
- EXPECT_EQ(framesTotal, oboeFramesWritten);
- EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
- EXPECT_EQ(oboeFramesRead, oboeFramesWritten);
-
- // The buffer should be empty after a flush so we should be able to write.
- framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
- // There should be some room for priming the buffer.
- ASSERT_TRUE(framesWritten > 0 && framesWritten <= framesPerBurst);
-
- EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
-
-// Test Writing to an OboeStream using LEGACY sharing mode.
-TEST(test_oboe_api, oboe_stream_legacy) {
- runtest_oboe_stream(OBOE_SHARING_MODE_LEGACY);
-}
-
-// Test Writing to an OboeStream using EXCLUSIVE sharing mode.
-TEST(test_oboe_api, oboe_stream_exclusive) {
- runtest_oboe_stream(OBOE_SHARING_MODE_EXCLUSIVE);
-}
-
-#define OBOE_THREAD_ANSWER 1826375
-#define OBOE_THREAD_DURATION_MSEC 500
-
-static void *TestOboeStreamThreadProc(void *arg) {
- OboeStream oboeStream = (OboeStream) reinterpret_cast<size_t>(arg);
- oboe_stream_state_t state;
-
- // Use this to sleep by waiting for something that won't happen.
- EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
- OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state,
- OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND);
- return reinterpret_cast<void *>(OBOE_THREAD_ANSWER);
-}
-
-// Test creating a stream related thread.
-TEST(test_oboe_api, oboe_stream_thread_basic) {
- OboeStreamBuilder oboeBuilder;
- OboeStream oboeStream;
- oboe_result_t result = OBOE_OK;
- void *threadResult;
-
- // Use an OboeStreamBuilder to define the stream.
- result = Oboe_createStreamBuilder(&oboeBuilder);
- ASSERT_EQ(OBOE_OK, result);
-
- // Create an OboeStream using the Builder.
- ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
-
- // Start a thread.
- ASSERT_EQ(OBOE_OK, OboeStream_createThread(oboeStream,
- 10 * OBOE_NANOS_PER_MILLISECOND,
- TestOboeStreamThreadProc,
- reinterpret_cast<void *>(oboeStream)));
- // Thread already started.
- ASSERT_NE(OBOE_OK, OboeStream_createThread(oboeStream, // should fail!
- 10 * OBOE_NANOS_PER_MILLISECOND,
- TestOboeStreamThreadProc,
- reinterpret_cast<void *>(oboeStream)));
-
- // Wait for the thread to finish.
- ASSERT_EQ(OBOE_OK, OboeStream_joinThread(oboeStream,
- &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
- // The thread returns a special answer.
- ASSERT_EQ(OBOE_THREAD_ANSWER, (int)reinterpret_cast<size_t>(threadResult));
-
- // Thread should already be joined.
- ASSERT_NE(OBOE_OK, OboeStream_joinThread(oboeStream, // should fail!
- &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
-
- // Cleanup
- EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
- EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index c63ab47..3235e81 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -46,6 +46,8 @@
#include <media/hardware/HardwareAPI.h>
#include <media/OMXBuffer.h>
+#include <hidlmemory/mapping.h>
+
#include <OMX_AudioExt.h>
#include <OMX_VideoExt.h>
#include <OMX_Component.h>
@@ -59,6 +61,10 @@
#include "include/SharedMemoryBuffer.h"
#include "omx/OMXUtils.h"
+#include <android/hidl/memory/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include "omx/hal/1.0/utils/WOmxNode.h"
+
namespace android {
using binder::Status;
@@ -282,7 +288,9 @@
////////////////////////////////////////////////////////////////////////////////
-struct ACodec::DeathNotifier : public IBinder::DeathRecipient {
+struct ACodec::DeathNotifier :
+ public IBinder::DeathRecipient,
+ public ::android::hardware::hidl_death_recipient {
explicit DeathNotifier(const sp<AMessage> ¬ify)
: mNotify(notify) {
}
@@ -291,6 +299,12 @@
mNotify->post();
}
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+ mNotify->post();
+ }
+
protected:
virtual ~DeathNotifier() {}
@@ -560,6 +574,8 @@
memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
changeState(mUninitializedState);
+
+ updateTrebleFlag();
}
ACodec::~ACodec() {
@@ -811,7 +827,11 @@
status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
- CHECK(mDealer[portIndex] == NULL);
+ if (getTrebleFlag()) {
+ CHECK(mAllocator[portIndex] == NULL);
+ } else {
+ CHECK(mDealer[portIndex] == NULL);
+ }
CHECK(mBuffers[portIndex].isEmpty());
status_t err;
@@ -874,14 +894,26 @@
return NO_MEMORY;
}
- size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
if (mode != IOMX::kPortModePresetSecureBuffer) {
- mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+ if (getTrebleFlag()) {
+ mAllocator[portIndex] = TAllocator::getService("ashmem");
+ if (mAllocator[portIndex] == nullptr) {
+ ALOGE("hidl allocator on port %d is null",
+ (int)portIndex);
+ return NO_MEMORY;
+ }
+ } else {
+ size_t totalSize = def.nBufferCountActual *
+ (alignedSize + alignedConvSize);
+ mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+ }
}
const sp<AMessage> &format =
portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
+ hidl_memory hidlMemToken;
+ sp<TMemory> hidlMem;
sp<IMemory> mem;
BufferInfo info;
@@ -903,30 +935,86 @@
: new SecureBuffer(format, native_handle, bufSize);
info.mCodecData = info.mData;
} else {
- mem = mDealer[portIndex]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
- }
+ if (getTrebleFlag()) {
+ bool success;
+ auto transStatus = mAllocator[portIndex]->allocate(
+ bufSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
- err = mOMXNode->useBuffer(portIndex, mem, &info.mBufferID);
+ if (!transStatus.isOk()) {
+ ALOGE("hidl's AshmemAllocator failed at the "
+ "transport: %s",
+ transStatus.description().c_str());
+ return NO_MEMORY;
+ }
+ if (!success) {
+ return NO_MEMORY;
+ }
+ hidlMem = mapMemory(hidlMemToken);
+
+ err = mOMXNode->useBuffer(
+ portIndex, hidlMemToken, &info.mBufferID);
+ } else {
+ mem = mDealer[portIndex]->allocate(bufSize);
+ if (mem == NULL || mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+
+ err = mOMXNode->useBuffer(
+ portIndex, mem, &info.mBufferID);
+ }
if (mode == IOMX::kPortModeDynamicANWBuffer) {
- ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
+ VideoNativeMetadata* metaData = (VideoNativeMetadata*)(
+ getTrebleFlag() ?
+ (void*)hidlMem->getPointer() : mem->pointer());
+ metaData->nFenceFd = -1;
}
- info.mCodecData = new SharedMemoryBuffer(format, mem);
- info.mCodecRef = mem;
+ if (getTrebleFlag()) {
+ info.mCodecData = new SharedMemoryBuffer(
+ format, hidlMem);
+ info.mCodecRef = hidlMem;
+ } else {
+ info.mCodecData = new SharedMemoryBuffer(
+ format, mem);
+ info.mCodecRef = mem;
+ }
// if we require conversion, allocate conversion buffer for client use;
// otherwise, reuse codec buffer
if (mConverter[portIndex] != NULL) {
CHECK_GT(conversionBufferSize, (size_t)0);
- mem = mDealer[portIndex]->allocate(conversionBufferSize);
- if (mem == NULL|| mem->pointer() == NULL) {
- return NO_MEMORY;
+ if (getTrebleFlag()) {
+ bool success;
+ mAllocator[portIndex]->allocate(
+ conversionBufferSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
+ if (!success) {
+ return NO_MEMORY;
+ }
+ hidlMem = mapMemory(hidlMemToken);
+ info.mData = new SharedMemoryBuffer(format, hidlMem);
+ info.mMemRef = hidlMem;
+ } else {
+ mem = mDealer[portIndex]->allocate(
+ conversionBufferSize);
+ if (mem == NULL|| mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+ info.mData = new SharedMemoryBuffer(format, mem);
+ info.mMemRef = mem;
}
- info.mData = new SharedMemoryBuffer(format, mem);
- info.mMemRef = mem;
} else {
info.mData = info.mCodecData;
info.mMemRef = info.mCodecRef;
@@ -1458,8 +1546,11 @@
}
}
- // clear mDealer even on an error
- mDealer[portIndex].clear();
+ if (getTrebleFlag()) {
+ mAllocator[portIndex].clear();
+ } else {
+ mDealer[portIndex].clear();
+ }
return err;
}
@@ -6041,8 +6132,16 @@
if (mDeathNotifier != NULL) {
if (mCodec->mOMXNode != NULL) {
- sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
- binder->unlinkToDeath(mDeathNotifier);
+ if (mCodec->getTrebleFlag()) {
+ auto tOmxNode =
+ (static_cast<
+ ::android::hardware::media::omx::V1_0::utils::
+ LWOmxNode*>(mCodec->mOMXNode.get()))->mBase;
+ tOmxNode->unlinkToDeath(mDeathNotifier);
+ } else {
+ sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
+ binder->unlinkToDeath(mDeathNotifier);
+ }
}
mDeathNotifier.clear();
}
@@ -6130,7 +6229,8 @@
CHECK(mCodec->mOMXNode == NULL);
OMXClient client;
- if (client.connect() != OK) {
+ if ((mCodec->updateTrebleFlag() ?
+ client.connectTreble() : client.connect()) != OK) {
mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
return false;
}
@@ -6202,10 +6302,18 @@
}
mDeathNotifier = new DeathNotifier(notify);
- if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
- // This was a local binder, if it dies so do we, we won't care
- // about any notifications in the afterlife.
- mDeathNotifier.clear();
+ if (mCodec->getTrebleFlag()) {
+ auto tOmxNode = (static_cast<::android::hardware::media::omx::V1_0::
+ utils::LWOmxNode*>(omxNode.get()))->mBase;
+ if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+ mDeathNotifier.clear();
+ }
+ } else {
+ if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
+ // This was a local binder, if it dies so do we, we won't care
+ // about any notifications in the afterlife.
+ mDeathNotifier.clear();
+ }
}
notify = new AMessage(kWhatOMXMessageList, mCodec);
@@ -7181,7 +7289,11 @@
mCodec->mBuffers[kPortIndexOutput].size());
err = FAILED_TRANSACTION;
} else {
- mCodec->mDealer[kPortIndexOutput].clear();
+ if (mCodec->getTrebleFlag()) {
+ mCodec->mAllocator[kPortIndexOutput].clear();
+ } else {
+ mCodec->mDealer[kPortIndexOutput].clear();
+ }
}
if (err == OK) {
@@ -7564,7 +7676,8 @@
}
OMXClient client;
- status_t err = client.connect();
+ status_t err = getTrebleFlag() ?
+ client.connectTreble() : client.connect();
if (err != OK) {
return err;
}
@@ -7780,4 +7893,15 @@
return OK;
}
+bool ACodec::updateTrebleFlag() {
+ mTrebleFlag = bool(property_get_bool("debug.treble_omx", 0));
+ ALOGV("updateTrebleFlag() returns %s",
+ mTrebleFlag ? "true" : "false");
+ return mTrebleFlag;
+}
+
+bool ACodec::getTrebleFlag() const {
+ return mTrebleFlag;
+}
+
} // namespace android
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index c2407f7..410dbc9 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -113,7 +113,11 @@
libstagefright_foundation \
libdl \
libRScpp \
- libhidlbase \
+ libhidlbase \
+ libhidlmemory \
+ android.hidl.memory@1.0 \
+ android.hardware.media.omx@1.0 \
+ android.hardware.media.omx@1.0-utils \
LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index 40eb942..fee3739 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -34,6 +34,11 @@
mMemory(mem) {
}
+SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem)
+ : MediaCodecBuffer(format, new ABuffer(mem->getPointer(), mem->getSize())),
+ mTMemory(mem) {
+}
+
SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
: MediaCodecBuffer(format, new ABuffer(nullptr, size)),
mPointer(ptr) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 4a965ba..ded79f3 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -130,7 +130,7 @@
}
String8 cacheConfig;
- bool disconnectAtHighwatermark;
+ bool disconnectAtHighwatermark = false;
KeyedVector<String8, String8> nonCacheSpecificHeaders;
if (headers != NULL) {
nonCacheSpecificHeaders = *headers;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 87d7d3c..0dd3e88 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -4515,8 +4515,8 @@
// fall through
}
- off64_t offset;
- size_t size;
+ off64_t offset = 0;
+ size_t size = 0;
uint32_t cts, stts;
bool isSyncSample;
bool newBuffer = false;
@@ -5066,6 +5066,10 @@
return NULL;
}
+void MPEG4Extractor::populateMetrics() {
+ ALOGV("MPEG4Extractor::populateMetrics");
+}
+
static bool LegacySniffMPEG4(
const sp<DataSource> &source, String8 *mimeType, float *confidence) {
uint8_t header[8];
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5dc9ffa..a332cce 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -59,10 +59,13 @@
namespace android {
// key for media statistics
-static const char *CodecKeyName = "codec";
+static const char *kCodecKeyName = "codec";
// attrs for media statistics
-static const char *CodecMime = "mime";
-static const char *CodecCodec = "codec";
+static const char *kCodecCodec = "codec"; /* e.g. OMX.google.aac.decoder */
+static const char *kCodecMime = "mime"; /* e.g. audio/mime */
+static const char *kCodecMode = "mode"; /* audio, video */
+static const char *kCodecSecure = "secure"; /* 0, 1 */
+
static int64_t getId(const sp<IResourceManagerClient> &client) {
@@ -485,7 +488,7 @@
mUid = uid;
}
// set up our new record, get a sessionID, put it into the in-progress list
- mAnalyticsItem = new MediaAnalyticsItem(CodecKeyName);
+ mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
if (mAnalyticsItem != NULL) {
(void) mAnalyticsItem->generateSessionID();
// don't record it yet; only at the end, when we have decided that we have
@@ -627,11 +630,11 @@
if (mAnalyticsItem != NULL) {
if (nameIsType) {
// name is the mime type
- mAnalyticsItem->setCString(CodecMime, name.c_str());
+ mAnalyticsItem->setCString(kCodecMime, name.c_str());
} else {
- mAnalyticsItem->setCString(CodecCodec, name.c_str());
+ mAnalyticsItem->setCString(kCodecCodec, name.c_str());
}
- mAnalyticsItem->setCString("mode", mIsVideo ? "video" : "audio");
+ mAnalyticsItem->setCString(kCodecMode, mIsVideo ? "video" : "audio");
//mAnalyticsItem->setInt32("type", nameIsType);
if (nameIsType)
mAnalyticsItem->setInt32("encoder", encoder);
@@ -1465,6 +1468,10 @@
CHECK(msg->findString("componentName", &mComponentName));
+ if (mComponentName.c_str()) {
+ mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
+ }
+
if (mComponentName.startsWith("OMX.google.")) {
mFlags |= kFlagUsesSoftwareRenderer;
} else {
@@ -1475,9 +1482,11 @@
if (mComponentName.endsWith(".secure")) {
mFlags |= kFlagIsSecure;
resourceType = MediaResource::kSecureCodec;
+ mAnalyticsItem->setInt32(kCodecSecure, 1);
} else {
mFlags &= ~kFlagIsSecure;
resourceType = MediaResource::kNonSecureCodec;
+ mAnalyticsItem->setInt32(kCodecSecure, 0);
}
if (mIsVideo) {
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 677d43e..62c0d8a 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -55,7 +55,7 @@
namespace android {
// key for media statistics
-static const char *KeyName_Extractor = "extractor";
+static const char *kKeyExtractor = "extractor";
// attrs for media statistics
MediaExtractor::MediaExtractor() {
@@ -67,7 +67,7 @@
mAnalyticsItem = NULL;
if (MEDIA_LOG) {
- mAnalyticsItem = new MediaAnalyticsItem(KeyName_Extractor);
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyExtractor);
(void) mAnalyticsItem->generateSessionID();
}
}
@@ -93,6 +93,23 @@
return new MetaData;
}
+status_t MediaExtractor::getMetrics(Parcel *reply) {
+
+ if (mAnalyticsItem == NULL || reply == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ populateMetrics();
+ mAnalyticsItem->writeToParcel(reply);
+
+ return OK;
+}
+
+void MediaExtractor::populateMetrics() {
+ ALOGV("MediaExtractor::populateMetrics");
+ // normally overridden in subclasses
+}
+
uint32_t MediaExtractor::flags() const {
return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
}
@@ -247,24 +264,23 @@
// track the container format (mpeg, aac, wvm, etc)
if (MEDIA_LOG) {
if (ret->mAnalyticsItem != NULL) {
+ size_t ntracks = ret->countTracks();
ret->mAnalyticsItem->setCString("fmt", ret->name());
// tracks (size_t)
- ret->mAnalyticsItem->setInt32("ntrk", ret->countTracks());
+ ret->mAnalyticsItem->setInt32("ntrk", ntracks);
// metadata
sp<MetaData> pMetaData = ret->getMetaData();
if (pMetaData != NULL) {
String8 xx = pMetaData->toString();
- ALOGD("metadata says: %s", xx.string());
- // can grab various fields like:
// 'titl' -- but this verges into PII
// 'mime'
const char *mime = NULL;
if (pMetaData->findCString(kKeyMIMEType, &mime)) {
ret->mAnalyticsItem->setCString("mime", mime);
}
- // what else is interesting here?
+ // what else is interesting and not already available?
}
- }
+ }
}
}
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index b13877d..c7b8888 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -41,7 +41,7 @@
MediaMuxer::MediaMuxer(int fd, OutputFormat format)
: mFormat(format),
mState(UNINITIALIZED) {
- if (format == OUTPUT_FORMAT_MPEG_4) {
+ if (format == OUTPUT_FORMAT_MPEG_4 || format == OUTPUT_FORMAT_THREE_GPP) {
mWriter = new MPEG4Writer(fd);
} else if (format == OUTPUT_FORMAT_WEBM) {
mWriter = new WebmWriter(fd);
@@ -108,8 +108,8 @@
ALOGE("setLocation() must be called before start().");
return INVALID_OPERATION;
}
- if (mFormat != OUTPUT_FORMAT_MPEG_4) {
- ALOGE("setLocation() is only supported for .mp4 output.");
+ if (mFormat != OUTPUT_FORMAT_MPEG_4 && mFormat != OUTPUT_FORMAT_THREE_GPP) {
+ ALOGE("setLocation() is only supported for .mp4 pr .3gp output.");
return INVALID_OPERATION;
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index d25ce6c..1c1acb0 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -569,6 +569,11 @@
return OK;
}
+status_t NuMediaExtractor::getMetrics(Parcel *reply) {
+ status_t status = mImpl->getMetrics(reply);
+ return status;
+}
+
bool NuMediaExtractor::getTotalBitrate(int64_t *bitrate) const {
if (mTotalBitrate >= 0) {
*bitrate = mTotalBitrate;
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index a29aff0..b4e694c 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -29,6 +29,8 @@
#include "include/OMX.h"
+#include "omx/hal/1.0/utils/WOmx.h"
+
namespace android {
OMXClient::OMXClient() {
@@ -53,6 +55,21 @@
return OK;
}
+status_t OMXClient::connectTreble() {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> tOmx = IOmx::getService("default");
+ if (tOmx.get() == nullptr) {
+ ALOGE("Cannot obtain Treble IOmx.");
+ return NO_INIT;
+ }
+ if (!tOmx->isRemote()) {
+ ALOGE("Treble IOmx is in passthrough mode.");
+ return NO_INIT;
+ }
+ mOMX = new utils::LWOmx(tOmx);
+ return OK;
+}
+
void OMXClient::disconnect() {
mOMX.clear();
}
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index 54c9fa3..4134698 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -305,8 +305,16 @@
return ERROR_OUT_OF_RANGE;
}
- while (sampleIndex >= mTTSSampleIndex + mTTSCount) {
- if (mTimeToSampleIndex == mTable->mTimeToSampleCount) {
+ while (true) {
+ if (mTTSSampleIndex > UINT32_MAX - mTTSCount) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ if(sampleIndex < mTTSSampleIndex + mTTSCount) {
+ break;
+ }
+ if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
+ (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
+ mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
return ERROR_OUT_OF_RANGE;
}
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index a1c4979..6e7ef35 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -457,7 +457,10 @@
const uint8_t *nalStart;
size_t nalSize;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- CHECK_GT(nalSize, 0u);
+ if (nalSize == 0u) {
+ ALOGW("skipping empty nal unit from potentially malformed bitstream");
+ continue;
+ }
unsigned nalType = nalStart[0] & 0x1f;
diff --git a/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h b/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
index 337bff0..adb0dd4 100644
--- a/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
+++ b/media/libstagefright/codecs/mp3dec/src/pv_mp3dec_fxd_op_c_equivalent.h
@@ -50,6 +50,7 @@
+ __attribute__((no_sanitize("integer")))
__inline int32 pv_abs(int32 a)
{
int32 b = (a < 0) ? -a : a;
@@ -59,49 +60,58 @@
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q30(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 30);
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mac32_Q30(const Int32 a, const Int32 b, Int32 L_add)
{
return (L_add + (Int32)(((int64)(a) * b) >> 30));
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q32(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 32);
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q28(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 28);
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q27(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 27);
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q26(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 26);
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mac32_Q32(Int32 L_add, const Int32 a, const Int32 b)
{
return (L_add + (Int32)(((int64)(a) * b) >> 32));
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_msb32_Q32(Int32 L_sub, const Int32 a, const Int32 b)
{
return (L_sub - ((Int32)(((int64)(a) * b) >> 32)));
}
+ __attribute__((no_sanitize("integer")))
__inline Int32 fxp_mul32_Q29(const Int32 a, const Int32 b)
{
return (Int32)(((int64)(a) * b) >> 29);
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_16.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_16.cpp
index 9b9ae4b..cc99d5c 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_16.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_16.cpp
@@ -149,6 +149,7 @@
; FUNCTION CODE
----------------------------------------------------------------------------*/
+__attribute__((no_sanitize("integer")))
void pvmp3_dct_16(int32 vec[], int32 flag)
{
int32 tmp0;
@@ -308,6 +309,7 @@
/*----------------------------------------------------------------------------
; FUNCTION CODE
----------------------------------------------------------------------------*/
+__attribute__((no_sanitize("integer")))
void pvmp3_merge_in_place_N32(int32 vec[])
{
@@ -366,6 +368,7 @@
+__attribute__((no_sanitize("integer")))
void pvmp3_split(int32 *vect)
{
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
index d30ce4a..bbb247d 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_9.cpp
@@ -117,6 +117,7 @@
; FUNCTION CODE
----------------------------------------------------------------------------*/
+__attribute__((no_sanitize("integer")))
void pvmp3_dct_9(int32 vec[])
{
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_18.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_18.cpp
index 09a735b..324290e 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_18.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_18.cpp
@@ -129,6 +129,7 @@
+__attribute__((no_sanitize("integer")))
void pvmp3_mdct_18(int32 vec[], int32 *history, const int32 *window)
{
int32 i;
diff --git a/media/libstagefright/filters/Android.mk b/media/libstagefright/filters/Android.mk
index f8e8352..830d2aa 100644
--- a/media/libstagefright/filters/Android.mk
+++ b/media/libstagefright/filters/Android.mk
@@ -22,7 +22,9 @@
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_SHARED_LIBRARIES := libmedia
+LOCAL_SHARED_LIBRARIES := \
+ libmedia \
+ libhidlmemory \
LOCAL_MODULE:= libstagefright_mediafilter
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index fa05886..f847119 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -66,6 +66,8 @@
protected:
virtual ~MPEG4Extractor();
+ virtual void populateMetrics();
+
private:
struct PsshInfo {
diff --git a/media/libstagefright/include/SharedMemoryBuffer.h b/media/libstagefright/include/SharedMemoryBuffer.h
index 1d7f7a6..92df68a 100644
--- a/media/libstagefright/include/SharedMemoryBuffer.h
+++ b/media/libstagefright/include/SharedMemoryBuffer.h
@@ -19,6 +19,7 @@
#define SHARED_MEMORY_BUFFER_H_
#include <media/MediaCodecBuffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
namespace android {
@@ -30,7 +31,9 @@
*/
class SharedMemoryBuffer : public MediaCodecBuffer {
public:
+ typedef ::android::hidl::memory::V1_0::IMemory TMemory;
SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem);
+ SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem);
virtual ~SharedMemoryBuffer() = default;
@@ -38,6 +41,7 @@
SharedMemoryBuffer() = delete;
const sp<IMemory> mMemory;
+ const sp<TMemory> mTMemory;
};
} // namespace android
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 4909100..a0ddc28 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -43,6 +43,35 @@
static const OMX_U32 kPortIndexInput = 0;
+class GraphicBufferSource::OmxBufferSource : public BnOMXBufferSource {
+public:
+ GraphicBufferSource* mSource;
+
+ OmxBufferSource(GraphicBufferSource* source): mSource(source) {
+ }
+
+ Status onOmxExecuting() override {
+ return mSource->onOmxExecuting();
+ }
+
+ Status onOmxIdle() override {
+ return mSource->onOmxIdle();
+ }
+
+ Status onOmxLoaded() override {
+ return mSource->onOmxLoaded();
+ }
+
+ Status onInputBufferAdded(int bufferId) override {
+ return mSource->onInputBufferAdded(bufferId);
+ }
+
+ Status onInputBufferEmptied(
+ int bufferId, const OMXFenceParcelable& fenceParcel) override {
+ return mSource->onInputBufferEmptied(bufferId, fenceParcel);
+ }
+};
+
GraphicBufferSource::GraphicBufferSource() :
mInitCheck(UNKNOWN_ERROR),
mExecuting(false),
@@ -66,7 +95,8 @@
mTimePerFrameUs(-1ll),
mPrevCaptureUs(-1ll),
mPrevFrameUs(-1ll),
- mInputBufferTimeOffsetUs(0ll) {
+ mInputBufferTimeOffsetUs(0ll),
+ mOmxBufferSource(new OmxBufferSource(this)) {
ALOGV("GraphicBufferSource");
String8 name("GraphicBufferSource");
@@ -766,7 +796,7 @@
// Do setInputSurface() first, the node will try to enable metadata
// mode on input, and does necessary error checking. If this fails,
// we can't use this input surface on the node.
- status_t err = omxNode->setInputSurface(this);
+ status_t err = omxNode->setInputSurface(mOmxBufferSource);
if (err != NO_ERROR) {
ALOGE("Unable to set input surface: %d", err);
return Status::fromServiceSpecificError(err);
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 80fe078..153a035 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -55,7 +55,6 @@
* things up until we're ready to go.
*/
class GraphicBufferSource : public BnGraphicBufferSource,
- public BnOMXBufferSource,
public BufferQueue::ConsumerListener {
public:
GraphicBufferSource();
@@ -77,26 +76,26 @@
// This is called when OMX transitions to OMX_StateExecuting, which means
// we can start handing it buffers. If we already have buffers of data
// sitting in the BufferQueue, this will send them to the codec.
- Status onOmxExecuting() override;
+ Status onOmxExecuting();
// This is called when OMX transitions to OMX_StateIdle, indicating that
// the codec is meant to return all buffers back to the client for them
// to be freed. Do NOT submit any more buffers to the component.
- Status onOmxIdle() override;
+ Status onOmxIdle();
// This is called when OMX transitions to OMX_StateLoaded, indicating that
// we are shutting down.
- Status onOmxLoaded() override;
+ Status onOmxLoaded();
// A "codec buffer", i.e. a buffer that can be used to pass data into
// the encoder, has been allocated. (This call does not call back into
// OMXNodeInstance.)
- Status onInputBufferAdded(int32_t bufferID) override;
+ Status onInputBufferAdded(int32_t bufferID);
// Called from OnEmptyBufferDone. If we have a BQ buffer available,
// fill it with a new frame of data; otherwise, just mark it as available.
Status onInputBufferEmptied(
- int32_t bufferID, const OMXFenceParcelable& fenceParcel) override;
+ int32_t bufferID, const OMXFenceParcelable& fenceParcel);
// Configure the buffer source to be used with an OMX node with the default
// data space.
@@ -301,6 +300,9 @@
ColorAspects mColorAspects;
+ class OmxBufferSource;
+ sp<OmxBufferSource> mOmxBufferSource;
+
void onMessageReceived(const sp<AMessage> &msg);
DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 03aaa71..39ed759 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -1133,7 +1133,7 @@
buffer_meta = new BufferMeta(
params, hParams, portIndex, false /* copy */, data);
} else {
- data = static_cast<OMX_U8 *>(params->pointer());
+ data = static_cast<OMX_U8 *>(paramsPointer);
buffer_meta = new BufferMeta(
params, hParams, portIndex, false /* copy */, NULL);
diff --git a/media/libstagefright/omx/hal/1.0/impl/Conversion.h b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
index 9cfb4f2..68a3ed2 100644
--- a/media/libstagefright/omx/hal/1.0/impl/Conversion.h
+++ b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
@@ -294,10 +294,11 @@
if (!*nh) {
return false;
}
- t->fence = inHidlHandle(*nh);
+ t->fence = *nh;
switch (l.type) {
case omx_message::EVENT:
t->type = Message::Type::EVENT;
+ t->data.eventData.event = uint32_t(l.u.event_data.event);
t->data.eventData.data1 = l.u.event_data.data1;
t->data.eventData.data2 = l.u.event_data.data2;
t->data.eventData.data3 = l.u.event_data.data3;
@@ -343,6 +344,7 @@
switch (t.type) {
case Message::Type::EVENT:
l->type = omx_message::EVENT;
+ l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
l->u.event_data.data1 = t.data.eventData.data1;
l->u.event_data.data2 = t.data.eventData.data2;
l->u.event_data.data3 = t.data.eventData.data3;
@@ -590,6 +592,16 @@
}
case OMXBuffer::kBufferTypeANWBuffer: {
t->type = CodecBuffer::Type::ANW_BUFFER;
+ if (l.mGraphicBuffer == nullptr) {
+ t->attr.anwBuffer.width = 0;
+ t->attr.anwBuffer.height = 0;
+ t->attr.anwBuffer.stride = 0;
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
+ t->attr.anwBuffer.layerCount = 0;
+ t->attr.anwBuffer.usage = 0;
+ t->nativeHandle = hidl_handle();
+ return true;
+ }
t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
@@ -634,6 +646,10 @@
return true;
}
case CodecBuffer::Type::ANW_BUFFER: {
+ if (t.nativeHandle.getNativeHandle() == nullptr) {
+ *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
+ return true;
+ }
*l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
t.attr.anwBuffer.width,
t.attr.anwBuffer.height,
@@ -2080,6 +2096,7 @@
t->transformHint = l.transformHint;
t->numPendingBuffers = l.numPendingBuffers;
t->nextFrameNumber = l.nextFrameNumber;
+ t->bufferReplaced = l.bufferReplaced;
return true;
}
@@ -2105,6 +2122,7 @@
l->transformHint = t.transformHint;
l->numPendingBuffers = t.numPendingBuffers;
l->nextFrameNumber = t.nextFrameNumber;
+ l->bufferReplaced = t.bufferReplaced;
return true;
}
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
index 9de8e3e..8ba2924 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
@@ -78,10 +78,6 @@
return toBinderStatus(mBase->signalEndOfInputStream());
}
-::android::IBinder* LWGraphicBufferSource::onAsBinder() {
- return nullptr;
-}
-
// TWGraphicBufferSource
TWGraphicBufferSource::TWGraphicBufferSource(
sp<LGraphicBufferSource> const& base) : mBase(base) {
@@ -89,47 +85,51 @@
Return<void> TWGraphicBufferSource::configure(
const sp<IOmxNode>& omxNode, Dataspace dataspace) {
- return toHardwareStatus(mBase->configure(
- new LWOmxNode(omxNode),
- toRawDataspace(dataspace)));
+ mBase->configure(new LWOmxNode(omxNode), toRawDataspace(dataspace));
+ return Void();
}
Return<void> TWGraphicBufferSource::setSuspend(bool suspend) {
- return toHardwareStatus(mBase->setSuspend(suspend));
+ mBase->setSuspend(suspend);
+ return Void();
}
Return<void> TWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
int64_t repeatAfterUs) {
- return toHardwareStatus(mBase->setRepeatPreviousFrameDelayUs(
- repeatAfterUs));
+ mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setMaxFps(float maxFps) {
- return toHardwareStatus(mBase->setMaxFps(maxFps));
+ mBase->setMaxFps(maxFps);
+ return Void();
}
Return<void> TWGraphicBufferSource::setTimeLapseConfig(
int64_t timePerFrameUs, int64_t timePerCaptureUs) {
- return toHardwareStatus(mBase->setTimeLapseConfig(
- timePerFrameUs, timePerCaptureUs));
+ mBase->setTimeLapseConfig(timePerFrameUs, timePerCaptureUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setStartTimeUs(int64_t startTimeUs) {
- return toHardwareStatus(mBase->setStartTimeUs(startTimeUs));
+ mBase->setStartTimeUs(startTimeUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setColorAspects(
const ColorAspects& aspects) {
- return toHardwareStatus(mBase->setColorAspects(toCompactColorAspects(
- aspects)));
+ mBase->setColorAspects(toCompactColorAspects(aspects));
+ return Void();
}
Return<void> TWGraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
- return toHardwareStatus(mBase->setTimeOffsetUs(timeOffsetUs));
+ mBase->setTimeOffsetUs(timeOffsetUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::signalEndOfInputStream() {
- return toHardwareStatus(mBase->signalEndOfInputStream());
+ mBase->signalEndOfInputStream();
+ return Void();
}
} // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
index 0b9f2ed..69efdde 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
@@ -23,7 +23,7 @@
#include <frameworks/native/include/binder/Binder.h>
#include <IOMX.h>
-#include <android/IGraphicBufferSource.h>
+#include <android/BnGraphicBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/graphics/common/1.0/types.h>
@@ -60,10 +60,11 @@
*/
typedef ::android::IGraphicBufferSource LGraphicBufferSource;
+typedef ::android::BnGraphicBufferSource BnGraphicBufferSource;
typedef ::android::hardware::media::omx::V1_0::IGraphicBufferSource
TGraphicBufferSource;
-struct LWGraphicBufferSource : public LGraphicBufferSource {
+struct LWGraphicBufferSource : public BnGraphicBufferSource {
sp<TGraphicBufferSource> mBase;
LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
::android::binder::Status configure(
@@ -78,8 +79,6 @@
::android::binder::Status setColorAspects(int32_t aspects) override;
::android::binder::Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
::android::binder::Status signalEndOfInputStream() override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWGraphicBufferSource : public TGraphicBufferSource {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp
index 0fa8c4c..da1c23d 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp
@@ -79,10 +79,6 @@
return transStatus == NO_ERROR ? fnStatus : transStatus;
}
-::android::IBinder* LWOmx::onAsBinder() {
- return nullptr;
-}
-
// TWOmx
TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
}
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmx.h b/media/libstagefright/omx/hal/1.0/impl/WOmx.h
index 5618d27..ab11c6a 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmx.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmx.h
@@ -45,6 +45,7 @@
using ::android::List;
using ::android::IOMX;
+using ::android::BnOMX;
/**
* Wrapper classes for conversion
@@ -55,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmx : public IOMX {
+struct LWOmx : public BnOMX {
sp<IOmx> mBase;
LWOmx(sp<IOmx> const& base);
status_t listNodes(List<IOMX::ComponentInfo>* list) override;
@@ -66,8 +67,6 @@
status_t createInputSurface(
sp<::android::IGraphicBufferProducer>* bufferProducer,
sp<::android::IGraphicBufferSource>* bufferSource) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmx : public IOmx {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
index a459c9f..3bd6c6e 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
@@ -203,7 +203,8 @@
Return<void> TWOmxBufferProducer::connect(
const sp<IOmxProducerListener>& listener,
int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
- sp<IProducerListener> lListener = new LWOmxProducerListener(listener);
+ sp<IProducerListener> lListener = listener == nullptr ?
+ nullptr : new LWOmxProducerListener(listener);
IGraphicBufferProducer::QueueBufferOutput lOutput;
status_t status = mBase->connect(lListener,
static_cast<int>(api),
@@ -479,7 +480,8 @@
status_t LWOmxBufferProducer::connect(
const sp<IProducerListener>& listener, int api,
bool producerControlledByApp, QueueBufferOutput* output) {
- sp<IOmxProducerListener> tListener = new TWOmxProducerListener(listener);
+ sp<IOmxProducerListener> tListener = listener == nullptr ?
+ nullptr : new TWOmxProducerListener(listener);
status_t fnStatus;
status_t transStatus = toStatusT(mBase->connect(
tListener, static_cast<int32_t>(api), producerControlledByApp,
@@ -582,10 +584,6 @@
return transStatus == NO_ERROR ? fnStatus : transStatus;
}
-::android::IBinder* LWOmxBufferProducer::onAsBinder() {
- return nullptr;
-}
-
} // namespace implementation
} // namespace V1_0
} // namespace omx
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
index a991f49..65b093c 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
@@ -47,6 +47,7 @@
using ::android::sp;
using ::android::IGraphicBufferProducer;
+using ::android::BnGraphicBufferProducer;
using ::android::IProducerListener;
struct TWOmxBufferProducer : public IOmxBufferProducer {
@@ -91,7 +92,7 @@
Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
};
-struct LWOmxBufferProducer : public IGraphicBufferProducer {
+struct LWOmxBufferProducer : public BnGraphicBufferProducer {
sp<IOmxBufferProducer> mBase;
LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
@@ -128,8 +129,6 @@
sp<Fence>* outFence, float outTransformMatrix[16]) override;
void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
status_t getUniqueId(uint64_t* outId) const override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
} // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
index 2e00894..97bcee0 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
@@ -61,38 +61,34 @@
::android::binder::Status status = toBinderStatus(
mBase->onInputBufferEmptied(
static_cast<uint32_t>(bufferId), fence));
- if (native_handle_delete(fenceNh) != 0) {
- return ::android::binder::Status::fromExceptionCode(
- ::android::binder::Status::EX_NULL_POINTER,
- "Cannot delete native handle");
- }
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
return status;
}
-::android::IBinder* LWOmxBufferSource::onAsBinder() {
- return nullptr;
-}
-
// TWOmxBufferSource
TWOmxBufferSource::TWOmxBufferSource(sp<IOMXBufferSource> const& base) :
mBase(base) {
}
Return<void> TWOmxBufferSource::onOmxExecuting() {
- return toHardwareStatus(mBase->onOmxExecuting());
+ mBase->onOmxExecuting();
+ return Void();
}
Return<void> TWOmxBufferSource::onOmxIdle() {
- return toHardwareStatus(mBase->onOmxIdle());
+ mBase->onOmxIdle();
+ return Void();
}
Return<void> TWOmxBufferSource::onOmxLoaded() {
- return toHardwareStatus(mBase->onOmxLoaded());
+ mBase->onOmxLoaded();
+ return Void();
}
Return<void> TWOmxBufferSource::onInputBufferAdded(uint32_t buffer) {
- return toHardwareStatus(mBase->onInputBufferAdded(
- static_cast<int32_t>(buffer)));
+ mBase->onInputBufferAdded(int32_t(buffer));
+ return Void();
}
Return<void> TWOmxBufferSource::onInputBufferEmptied(
@@ -102,8 +98,8 @@
return ::android::hardware::Status::fromExceptionCode(
::android::hardware::Status::EX_BAD_PARCELABLE);
}
- return toHardwareStatus(mBase->onInputBufferEmptied(
- static_cast<int32_t>(buffer), fenceParcelable));
+ mBase->onInputBufferEmptied(int32_t(buffer), fenceParcelable);
+ return Void();
}
} // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
index a2e940f..83bf46f 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
@@ -22,7 +22,7 @@
#include <hidl/Status.h>
#include <frameworks/native/include/binder/Binder.h>
-#include <android/IOMXBufferSource.h>
+#include <android/BnOMXBufferSource.h>
#include <OMXFenceParcelable.h>
namespace android {
@@ -45,6 +45,7 @@
using ::android::OMXFenceParcelable;
using ::android::IOMXBufferSource;
+using ::android::BnOMXBufferSource;
/**
* Wrapper classes for conversion
@@ -55,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxBufferSource : public IOMXBufferSource {
+struct LWOmxBufferSource : public BnOMXBufferSource {
sp<IOmxBufferSource> mBase;
LWOmxBufferSource(sp<IOmxBufferSource> const& base);
::android::binder::Status onOmxExecuting() override;
@@ -64,8 +65,6 @@
::android::binder::Status onInputBufferAdded(int32_t bufferID) override;
::android::binder::Status onInputBufferEmptied(
int32_t bufferID, OMXFenceParcelable const& fenceParcel) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxBufferSource : public IOmxBufferSource {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
index df3ef78..26617aa 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
@@ -196,6 +196,7 @@
}
status_t status = toStatusT(mBase->fillBuffer(
buffer, codecBuffer, fenceNh));
+ native_handle_close(fenceNh);
native_handle_delete(fenceNh);
return status;
}
@@ -217,6 +218,7 @@
flags,
toRawTicks(timestamp),
fenceNh));
+ native_handle_close(fenceNh);
native_handle_delete(fenceNh);
return status;
}
@@ -240,6 +242,7 @@
return NO_MEMORY;
}
status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+ native_handle_close(nh);
native_handle_delete(nh);
return status;
}
@@ -249,10 +252,6 @@
return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
}
-::android::IBinder* LWOmxNode::onAsBinder() {
- return nullptr;
-}
-
// TWOmxNode
TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
}
@@ -278,7 +277,8 @@
}
Return<Status> TWOmxNode::setParameter(
- uint32_t index, hidl_vec<uint8_t> const& params) {
+ uint32_t index, hidl_vec<uint8_t> const& inParams) {
+ hidl_vec<uint8_t> params(inParams);
return toStatus(mBase->setParameter(
toEnumIndexType(index),
static_cast<void const*>(params.data()),
@@ -298,7 +298,8 @@
}
Return<Status> TWOmxNode::setConfig(
- uint32_t index, const hidl_vec<uint8_t>& config) {
+ uint32_t index, const hidl_vec<uint8_t>& inConfig) {
+ hidl_vec<uint8_t> config(inConfig);
return toStatus(mBase->setConfig(
toEnumIndexType(index),
static_cast<void const*>(config.data()),
@@ -392,7 +393,7 @@
return toStatus(mBase->fillBuffer(
buffer,
omxBuffer,
- native_handle_read_fd(fence)));
+ dup(native_handle_read_fd(fence))));
}
Return<Status> TWOmxNode::emptyBuffer(
@@ -407,7 +408,7 @@
omxBuffer,
flags,
toOMXTicks(timestampUs),
- native_handle_read_fd(fence)));
+ dup(native_handle_read_fd(fence))));
}
Return<void> TWOmxNode::getExtensionIndex(
@@ -422,7 +423,7 @@
Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
omx_message lMsg;
- if (!wrapAs(&lMsg, tMsg)) {
+ if (!convertTo(&lMsg, tMsg)) {
return Status::BAD_VALUE;
}
return toStatus(mBase->dispatchMessage(lMsg));
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
index d606f3a..12c2f04 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
@@ -56,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxNode : public IOMXNode {
+struct LWOmxNode : public BnOMXNode {
sp<IOmxNode> mBase;
LWOmxNode(sp<IOmxNode> const& base);
status_t freeNode() override;
@@ -103,8 +103,6 @@
// TODO: this is temporary, will be removed when quirks move to OMX side.
status_t setQuirks(OMX_U32 quirks) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxNode : public IOmxNode {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
index 87e7961..ecd1db5 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
@@ -45,14 +45,11 @@
}
mBase->onMessages(tMessages);
for (auto& handle : handles) {
+ native_handle_close(handle);
native_handle_delete(handle);
}
}
-::android::IBinder* LWOmxObserver::onAsBinder() {
- return nullptr;
-}
-
// TWOmxObserver
TWOmxObserver::TWOmxObserver(sp<IOMXObserver> const& base) : mBase(base) {
}
@@ -61,7 +58,7 @@
std::list<omx_message> lMessages;
for (size_t i = 0; i < tMessages.size(); ++i) {
lMessages.push_back(omx_message{});
- wrapAs(&lMessages.back(), tMessages[i]);
+ convertTo(&lMessages.back(), tMessages[i]);
}
mBase->onMessages(lMessages);
return Return<void>();
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
index 85593c3..cfe4281 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
@@ -43,6 +43,7 @@
using ::android::sp;
using ::android::IOMXObserver;
+using ::android::BnOMXObserver;
using ::android::omx_message;
/**
@@ -54,12 +55,10 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxObserver : public IOMXObserver {
+struct LWOmxObserver : public BnOMXObserver {
sp<IOmxObserver> mBase;
LWOmxObserver(sp<IOmxObserver> const& base);
void onMessages(std::list<omx_message> const& lMessages) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxObserver : public IOmxObserver {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
index fa6e9aa..a5eed35 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
@@ -52,11 +52,6 @@
return static_cast<bool>(mBase->needsReleaseNotify());
}
-::android::IBinder* LWOmxProducerListener::onAsBinder() {
- return nullptr;
-}
-
-
} // namespace implementation
} // namespace V1_0
} // namespace omx
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
index b93a555..86656ca 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
@@ -42,6 +42,7 @@
using ::android::sp;
using ::android::IProducerListener;
+using ::android::BnProducerListener;
struct TWOmxProducerListener : public IOmxProducerListener {
sp<IProducerListener> mBase;
@@ -50,14 +51,12 @@
Return<bool> needsReleaseNotify() override;
};
-class LWOmxProducerListener : public IProducerListener {
+class LWOmxProducerListener : public BnProducerListener {
public:
sp<IOmxProducerListener> mBase;
LWOmxProducerListener(sp<IOmxProducerListener> const& base);
void onBufferReleased() override;
bool needsReleaseNotify() override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
} // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/utils/Conversion.h b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
index 6a99d8c..05f0c78 100644
--- a/media/libstagefright/omx/hal/1.0/utils/Conversion.h
+++ b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
@@ -294,10 +294,11 @@
if (!*nh) {
return false;
}
- t->fence = inHidlHandle(*nh);
+ t->fence = *nh;
switch (l.type) {
case omx_message::EVENT:
t->type = Message::Type::EVENT;
+ t->data.eventData.event = uint32_t(l.u.event_data.event);
t->data.eventData.data1 = l.u.event_data.data1;
t->data.eventData.data2 = l.u.event_data.data2;
t->data.eventData.data3 = l.u.event_data.data3;
@@ -343,6 +344,7 @@
switch (t.type) {
case Message::Type::EVENT:
l->type = omx_message::EVENT;
+ l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
l->u.event_data.data1 = t.data.eventData.data1;
l->u.event_data.data2 = t.data.eventData.data2;
l->u.event_data.data3 = t.data.eventData.data3;
@@ -590,6 +592,16 @@
}
case OMXBuffer::kBufferTypeANWBuffer: {
t->type = CodecBuffer::Type::ANW_BUFFER;
+ if (l.mGraphicBuffer == nullptr) {
+ t->attr.anwBuffer.width = 0;
+ t->attr.anwBuffer.height = 0;
+ t->attr.anwBuffer.stride = 0;
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
+ t->attr.anwBuffer.layerCount = 0;
+ t->attr.anwBuffer.usage = 0;
+ t->nativeHandle = hidl_handle();
+ return true;
+ }
t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
@@ -634,6 +646,10 @@
return true;
}
case CodecBuffer::Type::ANW_BUFFER: {
+ if (t.nativeHandle.getNativeHandle() == nullptr) {
+ *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
+ return true;
+ }
*l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
t.attr.anwBuffer.width,
t.attr.anwBuffer.height,
@@ -2080,6 +2096,7 @@
t->transformHint = l.transformHint;
t->numPendingBuffers = l.numPendingBuffers;
t->nextFrameNumber = l.nextFrameNumber;
+ t->bufferReplaced = l.bufferReplaced;
return true;
}
@@ -2105,6 +2122,7 @@
l->transformHint = t.transformHint;
l->numPendingBuffers = t.numPendingBuffers;
l->nextFrameNumber = t.nextFrameNumber;
+ l->bufferReplaced = t.bufferReplaced;
return true;
}
diff --git a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
index 037e9b2..a23b48a 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
@@ -78,10 +78,6 @@
return toBinderStatus(mBase->signalEndOfInputStream());
}
-::android::IBinder* LWGraphicBufferSource::onAsBinder() {
- return nullptr;
-}
-
// TWGraphicBufferSource
TWGraphicBufferSource::TWGraphicBufferSource(
sp<LGraphicBufferSource> const& base) : mBase(base) {
@@ -89,47 +85,51 @@
Return<void> TWGraphicBufferSource::configure(
const sp<IOmxNode>& omxNode, Dataspace dataspace) {
- return toHardwareStatus(mBase->configure(
- new LWOmxNode(omxNode),
- toRawDataspace(dataspace)));
+ mBase->configure(new LWOmxNode(omxNode), toRawDataspace(dataspace));
+ return Void();
}
Return<void> TWGraphicBufferSource::setSuspend(bool suspend) {
- return toHardwareStatus(mBase->setSuspend(suspend));
+ mBase->setSuspend(suspend);
+ return Void();
}
Return<void> TWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
int64_t repeatAfterUs) {
- return toHardwareStatus(mBase->setRepeatPreviousFrameDelayUs(
- repeatAfterUs));
+ mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setMaxFps(float maxFps) {
- return toHardwareStatus(mBase->setMaxFps(maxFps));
+ mBase->setMaxFps(maxFps);
+ return Void();
}
Return<void> TWGraphicBufferSource::setTimeLapseConfig(
int64_t timePerFrameUs, int64_t timePerCaptureUs) {
- return toHardwareStatus(mBase->setTimeLapseConfig(
- timePerFrameUs, timePerCaptureUs));
+ mBase->setTimeLapseConfig(timePerFrameUs, timePerCaptureUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setStartTimeUs(int64_t startTimeUs) {
- return toHardwareStatus(mBase->setStartTimeUs(startTimeUs));
+ mBase->setStartTimeUs(startTimeUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::setColorAspects(
const ColorAspects& aspects) {
- return toHardwareStatus(mBase->setColorAspects(toCompactColorAspects(
- aspects)));
+ mBase->setColorAspects(toCompactColorAspects(aspects));
+ return Void();
}
Return<void> TWGraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
- return toHardwareStatus(mBase->setTimeOffsetUs(timeOffsetUs));
+ mBase->setTimeOffsetUs(timeOffsetUs);
+ return Void();
}
Return<void> TWGraphicBufferSource::signalEndOfInputStream() {
- return toHardwareStatus(mBase->signalEndOfInputStream());
+ mBase->signalEndOfInputStream();
+ return Void();
}
} // namespace utils
diff --git a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
index 17a4486..d21de42 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
@@ -23,7 +23,7 @@
#include <frameworks/native/include/binder/Binder.h>
#include <IOMX.h>
-#include <android/IGraphicBufferSource.h>
+#include <android/BnGraphicBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/graphics/common/1.0/types.h>
@@ -60,10 +60,11 @@
*/
typedef ::android::IGraphicBufferSource LGraphicBufferSource;
+typedef ::android::BnGraphicBufferSource BnGraphicBufferSource;
typedef ::android::hardware::media::omx::V1_0::IGraphicBufferSource
TGraphicBufferSource;
-struct LWGraphicBufferSource : public LGraphicBufferSource {
+struct LWGraphicBufferSource : public BnGraphicBufferSource {
sp<TGraphicBufferSource> mBase;
LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
::android::binder::Status configure(
@@ -78,8 +79,6 @@
::android::binder::Status setColorAspects(int32_t aspects) override;
::android::binder::Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
::android::binder::Status signalEndOfInputStream() override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWGraphicBufferSource : public TGraphicBufferSource {
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp
index 07c9255..00f40cd 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp
@@ -79,10 +79,6 @@
return transStatus == NO_ERROR ? fnStatus : transStatus;
}
-::android::IBinder* LWOmx::onAsBinder() {
- return nullptr;
-}
-
// TWOmx
TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
}
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmx.h b/media/libstagefright/omx/hal/1.0/utils/WOmx.h
index 26affad..73adc55 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmx.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmx.h
@@ -45,6 +45,7 @@
using ::android::List;
using ::android::IOMX;
+using ::android::BnOMX;
/**
* Wrapper classes for conversion
@@ -55,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmx : public IOMX {
+struct LWOmx : public BnOMX {
sp<IOmx> mBase;
LWOmx(sp<IOmx> const& base);
status_t listNodes(List<IOMX::ComponentInfo>* list) override;
@@ -66,8 +67,6 @@
status_t createInputSurface(
sp<::android::IGraphicBufferProducer>* bufferProducer,
sp<::android::IGraphicBufferSource>* bufferSource) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmx : public IOmx {
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp
index 49f2706..cfb0cce 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp
@@ -203,7 +203,8 @@
Return<void> TWOmxBufferProducer::connect(
const sp<IOmxProducerListener>& listener,
int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
- sp<IProducerListener> lListener = new LWOmxProducerListener(listener);
+ sp<IProducerListener> lListener = listener == nullptr ?
+ nullptr : new LWOmxProducerListener(listener);
IGraphicBufferProducer::QueueBufferOutput lOutput;
status_t status = mBase->connect(lListener,
static_cast<int>(api),
@@ -479,7 +480,8 @@
status_t LWOmxBufferProducer::connect(
const sp<IProducerListener>& listener, int api,
bool producerControlledByApp, QueueBufferOutput* output) {
- sp<IOmxProducerListener> tListener = new TWOmxProducerListener(listener);
+ sp<IOmxProducerListener> tListener = listener == nullptr ?
+ nullptr : new TWOmxProducerListener(listener);
status_t fnStatus;
status_t transStatus = toStatusT(mBase->connect(
tListener, static_cast<int32_t>(api), producerControlledByApp,
@@ -582,10 +584,6 @@
return transStatus == NO_ERROR ? fnStatus : transStatus;
}
-::android::IBinder* LWOmxBufferProducer::onAsBinder() {
- return nullptr;
-}
-
} // namespace utils
} // namespace V1_0
} // namespace omx
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h
index 46abd27..a5d2961 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h
@@ -47,6 +47,7 @@
using ::android::sp;
using ::android::IGraphicBufferProducer;
+using ::android::BnGraphicBufferProducer;
using ::android::IProducerListener;
struct TWOmxBufferProducer : public IOmxBufferProducer {
@@ -91,7 +92,7 @@
Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
};
-struct LWOmxBufferProducer : public IGraphicBufferProducer {
+struct LWOmxBufferProducer : public BnGraphicBufferProducer {
sp<IOmxBufferProducer> mBase;
LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
@@ -128,8 +129,6 @@
sp<Fence>* outFence, float outTransformMatrix[16]) override;
void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
status_t getUniqueId(uint64_t* outId) const override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
} // namespace utils
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
index 1ebd9a7..f3f5b9d 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
@@ -61,38 +61,34 @@
::android::binder::Status status = toBinderStatus(
mBase->onInputBufferEmptied(
static_cast<uint32_t>(bufferId), fence));
- if (native_handle_delete(fenceNh) != 0) {
- return ::android::binder::Status::fromExceptionCode(
- ::android::binder::Status::EX_NULL_POINTER,
- "Cannot delete native handle");
- }
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
return status;
}
-::android::IBinder* LWOmxBufferSource::onAsBinder() {
- return nullptr;
-}
-
// TWOmxBufferSource
TWOmxBufferSource::TWOmxBufferSource(sp<IOMXBufferSource> const& base) :
mBase(base) {
}
Return<void> TWOmxBufferSource::onOmxExecuting() {
- return toHardwareStatus(mBase->onOmxExecuting());
+ mBase->onOmxExecuting();
+ return Void();
}
Return<void> TWOmxBufferSource::onOmxIdle() {
- return toHardwareStatus(mBase->onOmxIdle());
+ mBase->onOmxIdle();
+ return Void();
}
Return<void> TWOmxBufferSource::onOmxLoaded() {
- return toHardwareStatus(mBase->onOmxLoaded());
+ mBase->onOmxLoaded();
+ return Void();
}
Return<void> TWOmxBufferSource::onInputBufferAdded(uint32_t buffer) {
- return toHardwareStatus(mBase->onInputBufferAdded(
- static_cast<int32_t>(buffer)));
+ mBase->onInputBufferAdded(int32_t(buffer));
+ return Void();
}
Return<void> TWOmxBufferSource::onInputBufferEmptied(
@@ -102,8 +98,8 @@
return ::android::hardware::Status::fromExceptionCode(
::android::hardware::Status::EX_BAD_PARCELABLE);
}
- return toHardwareStatus(mBase->onInputBufferEmptied(
- static_cast<int32_t>(buffer), fenceParcelable));
+ mBase->onInputBufferEmptied(int32_t(buffer), fenceParcelable);
+ return Void();
}
} // namespace utils
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
index 3bf35c5..1214300 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
@@ -22,7 +22,7 @@
#include <hidl/Status.h>
#include <frameworks/native/include/binder/Binder.h>
-#include <android/IOMXBufferSource.h>
+#include <android/BnOMXBufferSource.h>
#include <OMXFenceParcelable.h>
namespace android {
@@ -45,6 +45,7 @@
using ::android::OMXFenceParcelable;
using ::android::IOMXBufferSource;
+using ::android::BnOMXBufferSource;
/**
* Wrapper classes for conversion
@@ -55,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxBufferSource : public IOMXBufferSource {
+struct LWOmxBufferSource : public BnOMXBufferSource {
sp<IOmxBufferSource> mBase;
LWOmxBufferSource(sp<IOmxBufferSource> const& base);
::android::binder::Status onOmxExecuting() override;
@@ -64,8 +65,6 @@
::android::binder::Status onInputBufferAdded(int32_t bufferID) override;
::android::binder::Status onInputBufferEmptied(
int32_t bufferID, OMXFenceParcelable const& fenceParcel) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxBufferSource : public IOmxBufferSource {
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
index dc04b67..3bb7a99 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
@@ -196,6 +196,7 @@
}
status_t status = toStatusT(mBase->fillBuffer(
buffer, codecBuffer, fenceNh));
+ native_handle_close(fenceNh);
native_handle_delete(fenceNh);
return status;
}
@@ -217,6 +218,7 @@
flags,
toRawTicks(timestamp),
fenceNh));
+ native_handle_close(fenceNh);
native_handle_delete(fenceNh);
return status;
}
@@ -240,6 +242,7 @@
return NO_MEMORY;
}
status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+ native_handle_close(nh);
native_handle_delete(nh);
return status;
}
@@ -249,10 +252,6 @@
return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
}
-::android::IBinder* LWOmxNode::onAsBinder() {
- return nullptr;
-}
-
// TWOmxNode
TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
}
@@ -278,7 +277,8 @@
}
Return<Status> TWOmxNode::setParameter(
- uint32_t index, hidl_vec<uint8_t> const& params) {
+ uint32_t index, hidl_vec<uint8_t> const& inParams) {
+ hidl_vec<uint8_t> params(inParams);
return toStatus(mBase->setParameter(
toEnumIndexType(index),
static_cast<void const*>(params.data()),
@@ -298,7 +298,8 @@
}
Return<Status> TWOmxNode::setConfig(
- uint32_t index, const hidl_vec<uint8_t>& config) {
+ uint32_t index, const hidl_vec<uint8_t>& inConfig) {
+ hidl_vec<uint8_t> config(inConfig);
return toStatus(mBase->setConfig(
toEnumIndexType(index),
static_cast<void const*>(config.data()),
@@ -392,7 +393,7 @@
return toStatus(mBase->fillBuffer(
buffer,
omxBuffer,
- native_handle_read_fd(fence)));
+ dup(native_handle_read_fd(fence))));
}
Return<Status> TWOmxNode::emptyBuffer(
@@ -407,7 +408,7 @@
omxBuffer,
flags,
toOMXTicks(timestampUs),
- native_handle_read_fd(fence)));
+ dup(native_handle_read_fd(fence))));
}
Return<void> TWOmxNode::getExtensionIndex(
@@ -422,7 +423,7 @@
Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
omx_message lMsg;
- if (!wrapAs(&lMsg, tMsg)) {
+ if (!convertTo(&lMsg, tMsg)) {
return Status::BAD_VALUE;
}
return toStatus(mBase->dispatchMessage(lMsg));
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
index cb0b1a7..9c4bb4a 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
@@ -56,7 +56,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxNode : public IOMXNode {
+struct LWOmxNode : public BnOMXNode {
sp<IOmxNode> mBase;
LWOmxNode(sp<IOmxNode> const& base);
status_t freeNode() override;
@@ -103,8 +103,6 @@
// TODO: this is temporary, will be removed when quirks move to OMX side.
status_t setQuirks(OMX_U32 quirks) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxNode : public IOmxNode {
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
index 4773572..db971f8 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
@@ -45,14 +45,11 @@
}
mBase->onMessages(tMessages);
for (auto& handle : handles) {
+ native_handle_close(handle);
native_handle_delete(handle);
}
}
-::android::IBinder* LWOmxObserver::onAsBinder() {
- return nullptr;
-}
-
// TWOmxObserver
TWOmxObserver::TWOmxObserver(sp<IOMXObserver> const& base) : mBase(base) {
}
@@ -61,7 +58,7 @@
std::list<omx_message> lMessages;
for (size_t i = 0; i < tMessages.size(); ++i) {
lMessages.push_back(omx_message{});
- wrapAs(&lMessages.back(), tMessages[i]);
+ convertTo(&lMessages.back(), tMessages[i]);
}
mBase->onMessages(lMessages);
return Return<void>();
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
index b1e2eb1..b9eb412 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
@@ -43,6 +43,7 @@
using ::android::sp;
using ::android::IOMXObserver;
+using ::android::BnOMXObserver;
using ::android::omx_message;
/**
@@ -54,12 +55,10 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxObserver : public IOMXObserver {
+struct LWOmxObserver : public BnOMXObserver {
sp<IOmxObserver> mBase;
LWOmxObserver(sp<IOmxObserver> const& base);
void onMessages(std::list<omx_message> const& lMessages) override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
struct TWOmxObserver : public IOmxObserver {
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp
index d43215d..80b0f71 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp
@@ -52,11 +52,6 @@
return static_cast<bool>(mBase->needsReleaseNotify());
}
-::android::IBinder* LWOmxProducerListener::onAsBinder() {
- return nullptr;
-}
-
-
} // namespace utils
} // namespace V1_0
} // namespace omx
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h
index 5b5e830..2be077b 100644
--- a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h
@@ -42,6 +42,7 @@
using ::android::sp;
using ::android::IProducerListener;
+using ::android::BnProducerListener;
struct TWOmxProducerListener : public IOmxProducerListener {
sp<IProducerListener> mBase;
@@ -50,14 +51,12 @@
Return<bool> needsReleaseNotify() override;
};
-class LWOmxProducerListener : public IProducerListener {
+class LWOmxProducerListener : public BnProducerListener {
public:
sp<IOmxProducerListener> mBase;
LWOmxProducerListener(sp<IOmxProducerListener> const& base);
void onBufferReleased() override;
bool needsReleaseNotify() override;
-protected:
- ::android::IBinder* onAsBinder() override;
};
} // namespace utils
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 9f0f8bf..6c7ffcb 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -35,11 +35,12 @@
#include "AsyncIO.h"
#include "MtpFfsHandle.h"
+#include "mtp.h"
#define cpu_to_le16(x) htole16(x)
#define cpu_to_le32(x) htole32(x)
-#define FUNCTIONFS_ENDPOINT_ALLOC _IOR('g', 131, __u32)
+#define FUNCTIONFS_ENDPOINT_ALLOC _IOR('g', 231, __u32)
namespace {
@@ -58,11 +59,16 @@
// To get good performance, override these with
// higher values per device using the properties
// sys.usb.ffs.max_read and sys.usb.ffs.max_write
-constexpr int USB_FFS_MAX_WRITE = 32768;
-constexpr int USB_FFS_MAX_READ = 32768;
+constexpr int USB_FFS_MAX_WRITE = MTP_BUFFER_SIZE;
+constexpr int USB_FFS_MAX_READ = MTP_BUFFER_SIZE;
+
+static_assert(USB_FFS_MAX_WRITE > 0, "Max r/w values must be > 0!");
+static_assert(USB_FFS_MAX_READ > 0, "Max r/w values must be > 0!");
constexpr unsigned int MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
+constexpr size_t ENDPOINT_ALLOC_RETRIES = 10;
+
struct func_desc {
struct usb_interface_descriptor intf;
struct usb_endpoint_descriptor_no_audio sink;
@@ -344,28 +350,9 @@
if (mBulkIn > -1 || mBulkOut > -1 || mIntr > -1)
LOG(WARNING) << "Endpoints were not closed before configure!";
- mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
- if (mBulkIn < 0) {
- PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
- goto err;
- }
-
- mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
- if (mBulkOut < 0) {
- PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
- goto err;
- }
-
- mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
- if (mIntr < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open intr ep";
- goto err;
- }
-
return true;
err:
- closeEndpoints();
closeConfig();
return false;
}
@@ -447,6 +434,57 @@
int MtpFfsHandle::start() {
mLock.lock();
+
+ mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
+ if (mBulkIn < 0) {
+ PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
+ return -1;
+ }
+
+ mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
+ if (mBulkOut < 0) {
+ PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
+ return -1;
+ }
+
+ mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
+ if (mIntr < 0) {
+ PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open intr ep";
+ return -1;
+ }
+
+ mBuffer1.resize(MAX_FILE_CHUNK_SIZE);
+ mBuffer2.resize(MAX_FILE_CHUNK_SIZE);
+ posix_madvise(mBuffer1.data(), MAX_FILE_CHUNK_SIZE,
+ POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
+ posix_madvise(mBuffer2.data(), MAX_FILE_CHUNK_SIZE,
+ POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
+
+ // Get device specific r/w size
+ mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", USB_FFS_MAX_WRITE);
+ mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", USB_FFS_MAX_READ);
+
+ size_t attempts = 0;
+ while (mMaxWrite >= USB_FFS_MAX_WRITE && mMaxRead >= USB_FFS_MAX_READ &&
+ attempts < ENDPOINT_ALLOC_RETRIES) {
+ // If larger contiguous chunks of memory aren't available, attempt to try
+ // smaller allocations.
+ if (ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxWrite)) ||
+ ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxRead))) {
+ if (errno == ENODEV) {
+ // Driver hasn't enabled endpoints yet.
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ attempts += 1;
+ continue;
+ }
+ mMaxWrite /= 2;
+ mMaxRead /=2;
+ } else {
+ return 0;
+ }
+ }
+ // Try to start MtpServer anyway, with the smallest max r/w values
+ PLOG(ERROR) << "Functionfs could not allocate any memory!";
return 0;
}
@@ -465,13 +503,6 @@
return -1;
}
- // Get device specific r/w size
- mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", 0);
- mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", 0);
- if (!mMaxWrite)
- mMaxWrite = USB_FFS_MAX_WRITE;
- if (!mMaxRead)
- mMaxRead = USB_FFS_MAX_READ;
return 0;
}
@@ -480,26 +511,6 @@
mLock.unlock();
}
-class ScopedEndpointBufferAlloc {
-private:
- const int mFd;
- const unsigned int mAllocSize;
-public:
- ScopedEndpointBufferAlloc(int fd, unsigned alloc_size) :
- mFd(fd),
- mAllocSize(alloc_size) {
- if (ioctl(mFd, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mAllocSize)))
- PLOG(ERROR) << "FFS endpoint alloc failed!";
- }
-
- ~ScopedEndpointBufferAlloc() {
- if (ioctl(mFd, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(0)))
- PLOG(ERROR) << "FFS endpoint alloc reset failed!";
- }
-
- DISALLOW_COPY_AND_ASSIGN(ScopedEndpointBufferAlloc);
-};
-
/* Read from USB and write to a local file. */
int MtpFfsHandle::receiveFile(mtp_file_range mfr) {
// When receiving files, the incoming length is given in 32 bits.
@@ -507,15 +518,8 @@
uint32_t file_length = mfr.length;
uint64_t offset = lseek(mfr.fd, 0, SEEK_CUR);
- int buf1_len = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
- std::vector<char> buf1(buf1_len);
- char* data = buf1.data();
-
- // If necessary, allocate a second buffer for background r/w
- int buf2_len = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE),
- file_length - MAX_FILE_CHUNK_SIZE);
- std::vector<char> buf2(std::max(0, buf2_len));
- char *data2 = buf2.data();
+ char *data = mBuffer1.data();
+ char *data2 = mBuffer2.data();
struct aiocb aio;
aio.aio_fildes = mfr.fd;
@@ -527,9 +531,6 @@
bool write = false;
posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
- posix_madvise(data, buf1_len, POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
- posix_madvise(data2, buf2_len, POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
- ScopedEndpointBufferAlloc scoped_alloc(mBulkOut, mMaxRead);
// Break down the file into pieces that fit in buffers
while (file_length > 0 || write) {
@@ -596,7 +597,7 @@
uint64_t file_length = mfr.length;
uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
file_length + sizeof(mtp_data_header));
- uint64_t offset = 0;
+ uint64_t offset = mfr.offset;
struct usb_endpoint_descriptor mBulkIn_desc;
int packet_size;
@@ -607,21 +608,15 @@
packet_size = mBulkIn_desc.wMaxPacketSize;
}
- int init_read_len = packet_size - sizeof(mtp_data_header);
- int buf1_len = std::max(static_cast<uint64_t>(packet_size), std::min(
- static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length - init_read_len));
- std::vector<char> buf1(buf1_len);
- char *data = buf1.data();
+ // If file_length is larger than a size_t, truncating would produce the wrong comparison.
+ // Instead, promote the left side to 64 bits, then truncate the small result.
+ int init_read_len = std::min(
+ static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
- // If necessary, allocate a second buffer for background r/w
- int buf2_len = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE),
- file_length - MAX_FILE_CHUNK_SIZE - init_read_len);
- std::vector<char> buf2(std::max(0, buf2_len));
- char *data2 = buf2.data();
+ char *data = mBuffer1.data();
+ char *data2 = mBuffer2.data();
posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
- posix_madvise(data, buf1_len, POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
- posix_madvise(data2, buf2_len, POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
struct aiocb aio;
aio.aio_fildes = mfr.fd;
@@ -642,12 +637,10 @@
if (TEMP_FAILURE_RETRY(pread(mfr.fd, reinterpret_cast<char*>(data) +
sizeof(mtp_data_header), init_read_len, offset))
!= init_read_len) return -1;
+ if (writeHandle(mBulkIn, data, sizeof(mtp_data_header) + init_read_len) == -1) return -1;
+ if (file_length == static_cast<unsigned>(init_read_len)) return 0;
file_length -= init_read_len;
offset += init_read_len;
- if (writeHandle(mBulkIn, data, packet_size) == -1) return -1;
- if (file_length == 0) return 0;
-
- ScopedEndpointBufferAlloc scoped_alloc(mBulkIn, mMaxWrite);
// Break down the file into pieces that fit in buffers
while(file_length > 0) {
@@ -672,7 +665,7 @@
}
if (file_length > 0) {
- length = std::min((uint64_t) MAX_FILE_CHUNK_SIZE, file_length);
+ length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
// Queue up another read
aio.aio_buf = data;
aio.aio_offset = offset;
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 9cd4dcf..b4d5a97 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -48,6 +48,9 @@
int mMaxWrite;
int mMaxRead;
+ std::vector<char> mBuffer1;
+ std::vector<char> mBuffer2;
+
public:
int read(void *data, int len);
int write(const void *data, int len);
@@ -56,6 +59,10 @@
int sendFile(mtp_file_range mfr);
int sendEvent(mtp_event me);
+ /**
+ * Open ffs endpoints and allocate necessary kernel and user memory.
+ * Will sleep until endpoints are enabled, for up to 1 second.
+ */
int start();
void close();
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 8d56c16..753d833 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -179,6 +179,7 @@
if (sHandle->start()) {
ALOGE("Failed to start usb driver!");
+ sHandle->close();
return;
}
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index b511041..e575148 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -20,6 +20,7 @@
#include <fcntl.h>
#include <gtest/gtest.h>
#include <memory>
+#include <random>
#include <string>
#include <unistd.h>
#include <utils/Log.h>
@@ -28,6 +29,8 @@
namespace android {
+constexpr int MAX_FILE_CHUNK_SIZE = 3 * 1024 * 1024;
+
constexpr int TEST_PACKET_SIZE = 512;
constexpr int SMALL_MULT = 30;
constexpr int MED_MULT = 510;
@@ -42,6 +45,10 @@
"BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o"
"r implied.\n * Se";
+/**
+ * Functional tests for the MtpFfsHandle class. Ensures header and data integrity
+ * by mocking ffs endpoints as pipes to capture input / output.
+ */
class MtpFfsHandleTest : public ::testing::Test {
protected:
std::unique_ptr<IMtpHandle> handle;
@@ -72,6 +79,9 @@
EXPECT_EQ(pipe(fd), 0);
intr.reset(fd[0]);
ffs_handle->mIntr.reset(fd[1]);
+
+ ffs_handle->mBuffer1.resize(MAX_FILE_CHUNK_SIZE);
+ ffs_handle->mBuffer2.resize(MAX_FILE_CHUNK_SIZE);
}
~MtpFfsHandleTest() {}
@@ -138,6 +148,7 @@
mtp_file_range mfr;
mfr.command = 42;
mfr.transaction_id = 1337;
+ mfr.offset = 0;
int size = TEST_PACKET_SIZE * SMALL_MULT;
char buf[size + sizeof(mtp_data_header) + 1];
buf[size + sizeof(mtp_data_header)] = '\0';
@@ -166,6 +177,7 @@
mtp_file_range mfr;
mfr.command = 42;
mfr.transaction_id = 1337;
+ mfr.offset = 0;
int size = TEST_PACKET_SIZE * MED_MULT;
char buf[size + sizeof(mtp_data_header) + 1];
buf[size + sizeof(mtp_data_header)] = '\0';
@@ -189,6 +201,70 @@
EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
}
+TEST_F(MtpFfsHandleTest, testSendFileMedPartial) {
+ std::stringstream ss;
+ mtp_file_range mfr;
+ mfr.fd = dummy_file.fd;
+ mfr.command = 42;
+ mfr.transaction_id = 1337;
+ int size = TEST_PACKET_SIZE * MED_MULT;
+ char buf[size + 1];
+ buf[size] = '\0';
+
+ for (int i = 0; i < MED_MULT; i++)
+ ss << dummyDataStr;
+
+ EXPECT_EQ(write(dummy_file.fd, ss.str().c_str(), size), size);
+
+ std::random_device rd;
+ std::mt19937 gen(rd());
+ std::uniform_int_distribution<> dis(1, TEST_PACKET_SIZE);
+ int offset = 0;
+ while (offset != size) {
+ mfr.offset = offset;
+ int length = std::min(size - offset, dis(gen));
+ mfr.length = length;
+ char temp_buf[length + sizeof(mtp_data_header)];
+ EXPECT_EQ(handle->sendFile(mfr), 0);
+
+ EXPECT_EQ(read(bulk_in, temp_buf, length + sizeof(mtp_data_header)),
+ static_cast<long>(length + sizeof(mtp_data_header)));
+
+ struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(temp_buf);
+ EXPECT_EQ(header->length, static_cast<unsigned int>(length + sizeof(mtp_data_header)));
+ EXPECT_EQ(header->type, static_cast<unsigned int>(2));
+ EXPECT_EQ(header->command, static_cast<unsigned int>(42));
+ EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
+ memcpy(buf + offset, temp_buf + sizeof(mtp_data_header), length);
+ offset += length;
+ }
+ EXPECT_STREQ(buf, ss.str().c_str());
+}
+
+TEST_F(MtpFfsHandleTest, testSendFileEmpty) {
+ mtp_file_range mfr;
+ mfr.command = 42;
+ mfr.transaction_id = 1337;
+ mfr.offset = 0;
+ int size = 0;
+ char buf[size + sizeof(mtp_data_header) + 1];
+ buf[size + sizeof(mtp_data_header)] = '\0';
+
+ mfr.length = size;
+ mfr.fd = dummy_file.fd;
+
+ EXPECT_EQ(handle->sendFile(mfr), 0);
+
+ EXPECT_EQ(read(bulk_in, buf, size + sizeof(mtp_data_header)),
+ static_cast<long>(size + sizeof(mtp_data_header)));
+
+ struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(buf);
+ EXPECT_EQ(header->length, static_cast<unsigned int>(size + sizeof(mtp_data_header)));
+ EXPECT_EQ(header->type, static_cast<unsigned int>(2));
+ EXPECT_EQ(header->command, static_cast<unsigned int>(42));
+ EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
+}
+
TEST_F(MtpFfsHandleTest, testSendEvent) {
struct mtp_event event;
event.length = TEST_PACKET_SIZE;
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index cdce932..7a9240b 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -172,7 +172,8 @@
return NULL;
}
- status_t err = drm->createPlugin(uuid);
+ String8 nullPackageName;
+ status_t err = drm->createPlugin(uuid, nullPackageName);
if (err != OK) {
return NULL;
diff --git a/media/utils/ISchedulingPolicyService.cpp b/media/utils/ISchedulingPolicyService.cpp
index f5bfe20..22fbc97 100644
--- a/media/utils/ISchedulingPolicyService.cpp
+++ b/media/utils/ISchedulingPolicyService.cpp
@@ -37,13 +37,15 @@
{
}
- virtual int requestPriority(int32_t pid, int32_t tid, int32_t prio, bool asynchronous)
+ virtual int requestPriority(int32_t pid, int32_t tid,
+ int32_t prio, bool isForApp, bool asynchronous)
{
Parcel data, reply;
data.writeInterfaceToken(ISchedulingPolicyService::getInterfaceDescriptor());
data.writeInt32(pid);
data.writeInt32(tid);
data.writeInt32(prio);
+ data.writeBool(isForApp);
uint32_t flags = asynchronous ? IBinder::FLAG_ONEWAY : 0;
status_t status = remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply, flags);
if (status != NO_ERROR) {
diff --git a/media/utils/ISchedulingPolicyService.h b/media/utils/ISchedulingPolicyService.h
index b94b191..1015677 100644
--- a/media/utils/ISchedulingPolicyService.h
+++ b/media/utils/ISchedulingPolicyService.h
@@ -27,7 +27,7 @@
DECLARE_META_INTERFACE(SchedulingPolicyService);
virtual int requestPriority(/*pid_t*/int32_t pid, /*pid_t*/int32_t tid,
- int32_t prio, bool asynchronous) = 0;
+ int32_t prio, bool isForApp, bool asynchronous) = 0;
};
diff --git a/media/utils/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index 17ee9bc..d7055ef 100644
--- a/media/utils/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -28,7 +28,7 @@
static const String16 _scheduling_policy("scheduling_policy");
static Mutex sMutex;
-int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous)
+int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous)
{
// FIXME merge duplicated code related to service lookup, caching, and error recovery
int ret;
@@ -47,7 +47,7 @@
sSchedulingPolicyService = sps;
sMutex.unlock();
}
- ret = sps->requestPriority(pid, tid, prio, asynchronous);
+ ret = sps->requestPriority(pid, tid, prio, isForApp, asynchronous);
if (ret != DEAD_OBJECT) {
break;
}
diff --git a/media/utils/include/mediautils/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index a9870d4..47d8734 100644
--- a/media/utils/include/mediautils/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
@@ -24,7 +24,7 @@
// The asynchronous parameter should be 'true' to return immediately,
// after the request is enqueued but not necessarily executed.
// The default value 'false' means to return after request has been enqueued and executed.
-int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous = false);
+int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous = false);
} // namespace android
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index aa2cd95..0c620f4 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -30,7 +30,8 @@
Effects.cpp \
PatchPanel.cpp \
StateQueue.cpp \
- BufLog.cpp
+ BufLog.cpp \
+ TypedLogger.cpp
LOCAL_C_INCLUDES := \
$(TOPDIR)frameworks/av/services/audiopolicy \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index a248912..3d1f268 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -70,6 +70,8 @@
//#define BUFLOG_NDEBUG 0
#include <BufLog.h>
+#include "TypedLogger.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -97,6 +99,7 @@
uint32_t AudioFlinger::mScreenState;
+
#ifdef TEE_SINK
bool AudioFlinger::mTeeSinkInputEnabled = false;
bool AudioFlinger::mTeeSinkOutputEnabled = false;
@@ -111,6 +114,9 @@
// we define a minimum time during which a global effect is considered enabled.
static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200);
+Mutex gLock;
+wp<AudioFlinger> gAudioFlinger;
+
// ----------------------------------------------------------------------------
std::string formatToString(audio_format_t format) {
@@ -200,6 +206,8 @@
mPatchPanel = new PatchPanel(this);
mMode = AUDIO_MODE_NORMAL;
+
+ gAudioFlinger = this;
}
AudioFlinger::~AudioFlinger()
@@ -232,6 +240,84 @@
}
}
+//static
+__attribute__ ((visibility ("default")))
+status_t MmapStreamInterface::openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface)
+{
+ sp<AudioFlinger> af;
+ {
+ Mutex::Autolock _l(gLock);
+ af = gAudioFlinger.promote();
+ }
+ status_t ret = NO_INIT;
+ if (af != 0) {
+ ret = af->openMmapStream(
+ direction, attr, config, client, deviceId, callback, interface);
+ }
+ return ret;
+}
+
+status_t AudioFlinger::openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface)
+{
+ status_t ret = initCheck();
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+
+ audio_session_t sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
+ audio_io_handle_t io;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
+ audio_config_t fullConfig = AUDIO_CONFIG_INITIALIZER;
+ fullConfig.sample_rate = config->sample_rate;
+ fullConfig.channel_mask = config->channel_mask;
+ fullConfig.format = config->format;
+ ret = AudioSystem::getOutputForAttr(attr, &io,
+ sessionId,
+ &streamType, client.clientUid,
+ &fullConfig,
+ (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT),
+ *deviceId, &portId);
+ } else {
+ ret = AudioSystem::getInputForAttr(attr, &io,
+ sessionId,
+ client.clientPid,
+ client.clientUid,
+ config,
+ AUDIO_INPUT_FLAG_MMAP_NOIRQ, *deviceId, &portId);
+ }
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+
+ // at this stage, a MmapThread was created when openOutput() or openInput() was called by
+ // audio policy manager and we can retrieve it
+ sp<MmapThread> thread = mMmapThreads.valueFor(io);
+ if (thread != 0) {
+ interface = new MmapThreadHandle(thread);
+ thread->configure(attr, streamType, sessionId, callback, portId);
+ } else {
+ ret = NO_INIT;
+ }
+
+ ALOGV("%s done status %d portId %d", __FUNCTION__, ret, portId);
+
+ return ret;
+}
+
static const char * const audio_interfaces[] = {
AUDIO_HARDWARE_MODULE_ID_PRIMARY,
AUDIO_HARDWARE_MODULE_ID_A2DP,
@@ -396,6 +482,11 @@
mRecordThreads.valueAt(i)->dump(fd, args);
}
+ // dump mmap threads
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads.valueAt(i)->dump(fd, args);
+ }
+
// dump orphan effect chains
if (mOrphanEffectChains.size() != 0) {
write(fd, " Orphan Effect Chains\n", strlen(" Orphan Effect Chains\n"));
@@ -906,11 +997,9 @@
// assigned to HALs which do not have master mute support will apply master
// mute during the mix operation. Threads with HALs which do support master
// mute will simply ignore the setting.
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
- continue;
- }
- mPlaybackThreads.valueAt(i)->setMasterMute(muted);
+ Vector<VolumeInterface *> volumeInterfaces = getAllVolumeInterfaces_l();
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setMasterMute(muted);
}
return NO_ERROR;
@@ -941,12 +1030,12 @@
status_t AudioFlinger::checkStreamType(audio_stream_type_t stream) const
{
if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- ALOGW("setStreamVolume() invalid stream %d", stream);
+ ALOGW("checkStreamType() invalid stream %d", stream);
return BAD_VALUE;
}
pid_t caller = IPCThreadState::self()->getCallingPid();
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && caller != getpid_cached) {
- ALOGW("setStreamVolume() pid %d cannot use internal stream type %d", caller, stream);
+ ALOGW("checkStreamType() pid %d cannot use internal stream type %d", caller, stream);
return PERMISSION_DENIED;
}
@@ -968,22 +1057,22 @@
ALOG_ASSERT(stream != AUDIO_STREAM_PATCH, "attempt to change AUDIO_STREAM_PATCH volume");
AutoMutex lock(mLock);
- PlaybackThread *thread = NULL;
+ Vector<VolumeInterface *> volumeInterfaces;
if (output != AUDIO_IO_HANDLE_NONE) {
- thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface == NULL) {
return BAD_VALUE;
}
+ volumeInterfaces.add(volumeInterface);
}
mStreamTypes[stream].volume = value;
- if (thread == NULL) {
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
- }
- } else {
- thread->setStreamVolume(stream, value);
+ if (volumeInterfaces.size() == 0) {
+ volumeInterfaces = getAllVolumeInterfaces_l();
+ }
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setStreamVolume(stream, value);
}
return NO_ERROR;
@@ -1009,8 +1098,10 @@
AutoMutex lock(mLock);
mStreamTypes[stream].mute = muted;
- for (size_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
+ Vector<VolumeInterface *> volumeInterfaces = getAllVolumeInterfaces_l();
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setStreamMute(stream, muted);
+ }
return NO_ERROR;
}
@@ -1025,11 +1116,12 @@
AutoMutex lock(mLock);
float volume;
if (output != AUDIO_IO_HANDLE_NONE) {
- PlaybackThread *thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
- return 0.0f;
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface != NULL) {
+ volume = volumeInterface->streamVolume(stream);
+ } else {
+ volume = 0.0f;
}
- volume = thread->streamVolume(stream);
} else {
volume = streamVolume_l(stream);
}
@@ -1130,6 +1222,9 @@
thread = checkPlaybackThread_l(ioHandle);
if (thread == 0) {
thread = checkRecordThread_l(ioHandle);
+ if (thread == 0) {
+ thread = checkMmapThread_l(ioHandle);
+ }
} else if (thread == primaryPlaybackThread_l()) {
// indicate output device change to all input threads for pre processing
AudioParameter param = AudioParameter(keyValuePairs);
@@ -1171,15 +1266,17 @@
return out_s8;
}
- PlaybackThread *playbackThread = checkPlaybackThread_l(ioHandle);
- if (playbackThread != NULL) {
- return playbackThread->getParameters(keys);
+ ThreadBase *thread = (ThreadBase *)checkPlaybackThread_l(ioHandle);
+ if (thread == NULL) {
+ thread = (ThreadBase *)checkRecordThread_l(ioHandle);
+ if (thread == NULL) {
+ thread = (ThreadBase *)checkMmapThread_l(ioHandle);
+ if (thread == NULL) {
+ String8("");
+ }
+ }
}
- RecordThread *recordThread = checkRecordThread_l(ioHandle);
- if (recordThread != NULL) {
- return recordThread->getParameters(keys);
- }
- return String8("");
+ return thread->getParameters(keys);
}
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
@@ -1785,7 +1882,7 @@
// ----------------------------------------------------------------------------
-sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
+sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
@@ -1841,22 +1938,34 @@
mHardwareStatus = AUDIO_HW_IDLE;
if (status == NO_ERROR) {
-
- PlaybackThread *thread;
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
- } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
- || !isValidPcmSinkFormat(config->format)
- || !isValidPcmSinkChannelMask(config->channel_mask)) {
- thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
+ if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
+ sp<MmapPlaybackThread> thread =
+ new MmapPlaybackThread(this, *output, outHwDev, outputStream,
+ devices, AUDIO_DEVICE_NONE, mSystemReady);
+ mMmapThreads.add(*output, thread);
+ ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
+ *output, thread.get());
+ return thread;
} else {
- thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
+ sp<PlaybackThread> thread;
+ if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created offload output: ID %d thread %p",
+ *output, thread.get());
+ } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
+ || !isValidPcmSinkFormat(config->format)
+ || !isValidPcmSinkChannelMask(config->channel_mask)) {
+ thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created direct output: ID %d thread %p",
+ *output, thread.get());
+ } else {
+ thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created mixer output: ID %d thread %p",
+ *output, thread.get());
+ }
+ mPlaybackThreads.add(*output, thread);
+ return thread;
}
- mPlaybackThreads.add(*output, thread);
- return thread;
}
return 0;
@@ -1870,36 +1979,42 @@
uint32_t *latencyMs,
audio_output_flags_t flags)
{
- ALOGI("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
- module,
+ ALOGI("openOutput() this %p, module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
+ this, module,
(devices != NULL) ? *devices : 0,
config->sample_rate,
config->format,
config->channel_mask,
flags);
- if (*devices == AUDIO_DEVICE_NONE) {
+ if (devices == NULL || *devices == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
- sp<PlaybackThread> thread = openOutput_l(module, output, config, *devices, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
if (thread != 0) {
- *latencyMs = thread->latency();
+ if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ *latencyMs = playbackThread->latency();
- // notify client processes of the new output creation
- thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
+ // notify client processes of the new output creation
+ playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
- // the first primary output opened designates the primary hw device
- if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
- ALOGI("Using module %d has the primary audio interface", module);
- mPrimaryHardwareDev = thread->getOutput()->audioHwDev;
+ // the first primary output opened designates the primary hw device
+ if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
+ ALOGI("Using module %d has the primary audio interface", module);
+ mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev;
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_HW_SET_MODE;
- mPrimaryHardwareDev->hwDevice()->setMode(mMode);
- mHardwareStatus = AUDIO_HW_IDLE;
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_MODE;
+ mPrimaryHardwareDev->hwDevice()->setMode(mMode);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ }
+ } else {
+ MmapThread *mmapThread = (MmapThread *)thread.get();
+ mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
return NO_ERROR;
}
@@ -1938,54 +2053,68 @@
{
// keep strong reference on the playback thread so that
// it is not destroyed while exit() is executed
- sp<PlaybackThread> thread;
+ sp<PlaybackThread> playbackThread;
+ sp<MmapPlaybackThread> mmapThread;
{
Mutex::Autolock _l(mLock);
- thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
- return BAD_VALUE;
- }
+ playbackThread = checkPlaybackThread_l(output);
+ if (playbackThread != NULL) {
+ ALOGV("closeOutput() %d", output);
- ALOGV("closeOutput() %d", output);
-
- if (thread->type() == ThreadBase::MIXER) {
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
- DuplicatingThread *dupThread =
- (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
- dupThread->removeOutputTrack((MixerThread *)thread.get());
+ if (playbackThread->type() == ThreadBase::MIXER) {
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
+ DuplicatingThread *dupThread =
+ (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
+ dupThread->removeOutputTrack((MixerThread *)playbackThread.get());
+ }
}
}
- }
- mPlaybackThreads.removeItem(output);
- // save all effects to the default thread
- if (mPlaybackThreads.size()) {
- PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
- if (dstThread != NULL) {
- // audioflinger lock is held here so the acquisition order of thread locks does not
- // matter
- Mutex::Autolock _dl(dstThread->mLock);
- Mutex::Autolock _sl(thread->mLock);
- Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
- for (size_t i = 0; i < effectChains.size(); i ++) {
- moveEffectChain_l(effectChains[i]->sessionId(), thread.get(), dstThread, true);
+ mPlaybackThreads.removeItem(output);
+ // save all effects to the default thread
+ if (mPlaybackThreads.size()) {
+ PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
+ if (dstThread != NULL) {
+ // audioflinger lock is held here so the acquisition order of thread locks does not
+ // matter
+ Mutex::Autolock _dl(dstThread->mLock);
+ Mutex::Autolock _sl(playbackThread->mLock);
+ Vector< sp<EffectChain> > effectChains = playbackThread->getEffectChains_l();
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ moveEffectChain_l(effectChains[i]->sessionId(), playbackThread.get(), dstThread, true);
+ }
}
}
+ } else {
+ mmapThread = (MmapPlaybackThread *)checkMmapThread_l(output);
+ if (mmapThread == 0) {
+ return BAD_VALUE;
+ }
+ mMmapThreads.removeItem(output);
+ ALOGV("closing mmapThread %p", mmapThread.get());
}
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = output;
ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
}
- thread->exit();
// The thread entity (active unit of execution) is no longer running here,
// but the ThreadBase container still exists.
- if (!thread->isDuplicating()) {
- closeOutputFinish(thread);
+ if (playbackThread != 0) {
+ playbackThread->exit();
+ if (!playbackThread->isDuplicating()) {
+ closeOutputFinish(playbackThread);
+ }
+ } else if (mmapThread != 0) {
+ ALOGV("mmapThread exit()");
+ mmapThread->exit();
+ AudioStreamOut *out = mmapThread->clearOutput();
+ ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
+ // from now on thread->mOutput is NULL
+ delete out;
}
-
return NO_ERROR;
}
@@ -2049,7 +2178,7 @@
return BAD_VALUE;
}
- sp<RecordThread> thread = openInput_l(module, input, config, *devices, address, source, flags);
+ sp<ThreadBase> thread = openInput_l(module, input, config, *devices, address, source, flags);
if (thread != 0) {
// notify client processes of the new input creation
@@ -2059,7 +2188,7 @@
return NO_INIT;
}
-sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
+sp<AudioFlinger::ThreadBase> AudioFlinger::openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t devices,
@@ -2119,74 +2248,82 @@
}
if (status == NO_ERROR && inStream != 0) {
-
-#ifdef TEE_SINK
- // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
- // or (re-)create if current Pipe is idle and does not match the new format
- sp<NBAIO_Sink> teeSink;
- enum {
- TEE_SINK_NO, // don't copy input
- TEE_SINK_NEW, // copy input using a new pipe
- TEE_SINK_OLD, // copy input using an existing pipe
- } kind;
- NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
- audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
- if (!mTeeSinkInputEnabled) {
- kind = TEE_SINK_NO;
- } else if (!Format_isValid(format)) {
- kind = TEE_SINK_NO;
- } else if (mRecordTeeSink == 0) {
- kind = TEE_SINK_NEW;
- } else if (mRecordTeeSink->getStrongCount() != 1) {
- kind = TEE_SINK_NO;
- } else if (Format_isEqual(format, mRecordTeeSink->format())) {
- kind = TEE_SINK_OLD;
- } else {
- kind = TEE_SINK_NEW;
- }
- switch (kind) {
- case TEE_SINK_NEW: {
- Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {format};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mRecordTeeSink = pipe;
- mRecordTeeSource = pipeReader;
- teeSink = pipe;
- }
- break;
- case TEE_SINK_OLD:
- teeSink = mRecordTeeSink;
- break;
- case TEE_SINK_NO:
- default:
- break;
- }
-#endif
-
AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream, flags);
-
- // Start record thread
- // RecordThread requires both input and output device indication to forward to audio
- // pre processing modules
- sp<RecordThread> thread = new RecordThread(this,
- inputStream,
- *input,
- primaryOutputDevice_l(),
- devices,
- mSystemReady
+ if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+ sp<MmapCaptureThread> thread =
+ new MmapCaptureThread(this, *input,
+ inHwDev, inputStream,
+ primaryOutputDevice_l(), devices, mSystemReady);
+ mMmapThreads.add(*input, thread);
+ ALOGV("openInput_l() created mmap capture thread: ID %d thread %p", *input, thread.get());
+ return thread;
+ } else {
#ifdef TEE_SINK
- , teeSink
+ // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
+ // or (re-)create if current Pipe is idle and does not match the new format
+ sp<NBAIO_Sink> teeSink;
+ enum {
+ TEE_SINK_NO, // don't copy input
+ TEE_SINK_NEW, // copy input using a new pipe
+ TEE_SINK_OLD, // copy input using an existing pipe
+ } kind;
+ NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
+ audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
+ if (!mTeeSinkInputEnabled) {
+ kind = TEE_SINK_NO;
+ } else if (!Format_isValid(format)) {
+ kind = TEE_SINK_NO;
+ } else if (mRecordTeeSink == 0) {
+ kind = TEE_SINK_NEW;
+ } else if (mRecordTeeSink->getStrongCount() != 1) {
+ kind = TEE_SINK_NO;
+ } else if (Format_isEqual(format, mRecordTeeSink->format())) {
+ kind = TEE_SINK_OLD;
+ } else {
+ kind = TEE_SINK_NEW;
+ }
+ switch (kind) {
+ case TEE_SINK_NEW: {
+ Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mRecordTeeSink = pipe;
+ mRecordTeeSource = pipeReader;
+ teeSink = pipe;
+ }
+ break;
+ case TEE_SINK_OLD:
+ teeSink = mRecordTeeSink;
+ break;
+ case TEE_SINK_NO:
+ default:
+ break;
+ }
#endif
- );
- mRecordThreads.add(*input, thread);
- ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
- return thread;
+
+ // Start record thread
+ // RecordThread requires both input and output device indication to forward to audio
+ // pre processing modules
+ sp<RecordThread> thread = new RecordThread(this,
+ inputStream,
+ *input,
+ primaryOutputDevice_l(),
+ devices,
+ mSystemReady
+#ifdef TEE_SINK
+ , teeSink
+#endif
+ );
+ mRecordThreads.add(*input, thread);
+ ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
+ return thread;
+ }
}
*input = AUDIO_IO_HANDLE_NONE;
@@ -2202,60 +2339,73 @@
{
// keep strong reference on the record thread so that
// it is not destroyed while exit() is executed
- sp<RecordThread> thread;
+ sp<RecordThread> recordThread;
+ sp<MmapCaptureThread> mmapThread;
{
Mutex::Autolock _l(mLock);
- thread = checkRecordThread_l(input);
- if (thread == 0) {
- return BAD_VALUE;
- }
+ recordThread = checkRecordThread_l(input);
+ if (recordThread != 0) {
+ ALOGV("closeInput() %d", input);
- ALOGV("closeInput() %d", input);
-
- // If we still have effect chains, it means that a client still holds a handle
- // on at least one effect. We must either move the chain to an existing thread with the
- // same session ID or put it aside in case a new record thread is opened for a
- // new capture on the same session
- sp<EffectChain> chain;
- {
- Mutex::Autolock _sl(thread->mLock);
- Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
- // Note: maximum one chain per record thread
- if (effectChains.size() != 0) {
- chain = effectChains[0];
- }
- }
- if (chain != 0) {
- // first check if a record thread is already opened with a client on the same session.
- // This should only happen in case of overlap between one thread tear down and the
- // creation of its replacement
- size_t i;
- for (i = 0; i < mRecordThreads.size(); i++) {
- sp<RecordThread> t = mRecordThreads.valueAt(i);
- if (t == thread) {
- continue;
- }
- if (t->hasAudioSession(chain->sessionId()) != 0) {
- Mutex::Autolock _l(t->mLock);
- ALOGV("closeInput() found thread %d for effect session %d",
- t->id(), chain->sessionId());
- t->addEffectChain_l(chain);
- break;
+ // If we still have effect chains, it means that a client still holds a handle
+ // on at least one effect. We must either move the chain to an existing thread with the
+ // same session ID or put it aside in case a new record thread is opened for a
+ // new capture on the same session
+ sp<EffectChain> chain;
+ {
+ Mutex::Autolock _sl(recordThread->mLock);
+ Vector< sp<EffectChain> > effectChains = recordThread->getEffectChains_l();
+ // Note: maximum one chain per record thread
+ if (effectChains.size() != 0) {
+ chain = effectChains[0];
}
}
- // put the chain aside if we could not find a record thread with the same session id.
- if (i == mRecordThreads.size()) {
- putOrphanEffectChain_l(chain);
+ if (chain != 0) {
+ // first check if a record thread is already opened with a client on the same session.
+ // This should only happen in case of overlap between one thread tear down and the
+ // creation of its replacement
+ size_t i;
+ for (i = 0; i < mRecordThreads.size(); i++) {
+ sp<RecordThread> t = mRecordThreads.valueAt(i);
+ if (t == recordThread) {
+ continue;
+ }
+ if (t->hasAudioSession(chain->sessionId()) != 0) {
+ Mutex::Autolock _l(t->mLock);
+ ALOGV("closeInput() found thread %d for effect session %d",
+ t->id(), chain->sessionId());
+ t->addEffectChain_l(chain);
+ break;
+ }
+ }
+ // put the chain aside if we could not find a record thread with the same session id.
+ if (i == mRecordThreads.size()) {
+ putOrphanEffectChain_l(chain);
+ }
}
+ mRecordThreads.removeItem(input);
+ } else {
+ mmapThread = (MmapCaptureThread *)checkMmapThread_l(input);
+ if (mmapThread == 0) {
+ return BAD_VALUE;
+ }
+ mMmapThreads.removeItem(input);
}
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = input;
ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
- mRecordThreads.removeItem(input);
}
// FIXME: calling thread->exit() without mLock held should not be needed anymore now that
// we have a different lock for notification client
- closeInputFinish(thread);
+ if (recordThread != 0) {
+ closeInputFinish(recordThread);
+ } else if (mmapThread != 0) {
+ mmapThread->exit();
+ AudioStreamIn *in = mmapThread->clearInput();
+ ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
+ // from now on thread->mInput is NULL
+ delete in;
+ }
return NO_ERROR;
}
@@ -2283,7 +2433,9 @@
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
thread->invalidateTracks(stream);
}
-
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads[i]->invalidateTracks(stream);
+ }
return NO_ERROR;
}
@@ -2433,16 +2585,18 @@
// checkThread_l() must be called with AudioFlinger::mLock held
AudioFlinger::ThreadBase *AudioFlinger::checkThread_l(audio_io_handle_t ioHandle) const
{
- ThreadBase *thread = NULL;
- switch (audio_unique_id_get_use(ioHandle)) {
- case AUDIO_UNIQUE_ID_USE_OUTPUT:
- thread = checkPlaybackThread_l(ioHandle);
- break;
- case AUDIO_UNIQUE_ID_USE_INPUT:
- thread = checkRecordThread_l(ioHandle);
- break;
- default:
- break;
+ ThreadBase *thread = checkMmapThread_l(ioHandle);
+ if (thread == 0) {
+ switch (audio_unique_id_get_use(ioHandle)) {
+ case AUDIO_UNIQUE_ID_USE_OUTPUT:
+ thread = checkPlaybackThread_l(ioHandle);
+ break;
+ case AUDIO_UNIQUE_ID_USE_INPUT:
+ thread = checkRecordThread_l(ioHandle);
+ break;
+ default:
+ break;
+ }
}
return thread;
}
@@ -2466,6 +2620,42 @@
return mRecordThreads.valueFor(input).get();
}
+// checkMmapThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::MmapThread *AudioFlinger::checkMmapThread_l(audio_io_handle_t io) const
+{
+ return mMmapThreads.valueFor(io).get();
+}
+
+
+// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::VolumeInterface *AudioFlinger::getVolumeInterface_l(audio_io_handle_t output) const
+{
+ VolumeInterface *volumeInterface = (VolumeInterface *)mPlaybackThreads.valueFor(output).get();
+ if (volumeInterface == nullptr) {
+ MmapThread *mmapThread = mMmapThreads.valueFor(output).get();
+ if (mmapThread != nullptr) {
+ if (mmapThread->isOutput()) {
+ volumeInterface = (VolumeInterface *)mmapThread;
+ }
+ }
+ }
+ return volumeInterface;
+}
+
+Vector <AudioFlinger::VolumeInterface *> AudioFlinger::getAllVolumeInterfaces_l() const
+{
+ Vector <VolumeInterface *> volumeInterfaces;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ volumeInterfaces.add((VolumeInterface *)mPlaybackThreads.valueAt(i).get());
+ }
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ if (mMmapThreads.valueAt(i)->isOutput()) {
+ volumeInterfaces.add((VolumeInterface *)mMmapThreads.valueAt(i).get());
+ }
+ }
+ return volumeInterfaces;
+}
+
audio_unique_id_t AudioFlinger::nextUniqueId(audio_unique_id_use_t use)
{
// This is the internal API, so it is OK to assert on bad parameter.
@@ -2757,7 +2947,7 @@
break;
}
}
- if (io == 0) {
+ if (io == AUDIO_IO_HANDLE_NONE) {
for (size_t i = 0; i < mRecordThreads.size(); i++) {
if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
io = mRecordThreads.keyAt(i);
@@ -2765,6 +2955,14 @@
}
}
}
+ if (io == AUDIO_IO_HANDLE_NONE) {
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ if (mMmapThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+ io = mMmapThreads.keyAt(i);
+ break;
+ }
+ }
+ }
// If no output thread contains the requested session ID, default to
// first output. The effect chain will be moved to the correct output
// thread when a track with the same session ID is created
@@ -2777,9 +2975,12 @@
if (thread == NULL) {
thread = checkPlaybackThread_l(io);
if (thread == NULL) {
- ALOGE("createEffect() unknown output thread");
- lStatus = BAD_VALUE;
- goto Exit;
+ thread = checkMmapThread_l(io);
+ if (thread == NULL) {
+ ALOGE("createEffect() unknown output thread");
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
}
} else {
// Check if one effect chain was awaiting for an effect to be created on this
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index e97d1ed..a7142ef 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -34,6 +34,8 @@
#include <media/IAudioRecord.h>
#include <media/AudioSystem.h>
#include <media/AudioTrack.h>
+#include <media/MmapStreamInterface.h>
+#include <media/MmapStreamCallback.h>
#include <utils/Atomic.h>
#include <utils/Errors.h>
@@ -100,6 +102,7 @@
public BnAudioFlinger
{
friend class BinderService<AudioFlinger>; // for AudioFlinger()
+
public:
static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
@@ -283,6 +286,14 @@
sp<NBLog::Writer> newWriter_l(size_t size, const char *name);
void unregisterWriter(const sp<NBLog::Writer>& writer);
sp<EffectsFactoryHalInterface> getEffectsFactory();
+
+ status_t openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface);
private:
static const size_t kLogMemorySize = 40 * 1024;
sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled
@@ -290,6 +301,7 @@
// for as long as possible. The memory is only freed when it is needed for another log writer.
Vector< sp<NBLog::Writer> > mUnregisteredWriters;
Mutex mUnregisteredWritersLock;
+
public:
class SyncEvent;
@@ -533,19 +545,40 @@
void stop_nonvirtual();
};
+ // Mmap stream control interface implementation. Each MmapThreadHandle controls one
+ // MmapPlaybackThread or MmapCaptureThread instance.
+ class MmapThreadHandle : public MmapStreamInterface {
+ public:
+ explicit MmapThreadHandle(const sp<MmapThread>& thread);
+ virtual ~MmapThreadHandle();
+
+ // MmapStreamInterface virtuals
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ virtual status_t start(const MmapStreamInterface::Client& client, audio_port_handle_t *handle);
+ virtual status_t stop(audio_port_handle_t handle);
+
+ private:
+ sp<MmapThread> mThread;
+ };
ThreadBase *checkThread_l(audio_io_handle_t ioHandle) const;
PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
- sp<RecordThread> openInput_l(audio_module_handle_t module,
+ MmapThread *checkMmapThread_l(audio_io_handle_t io) const;
+ VolumeInterface *getVolumeInterface_l(audio_io_handle_t output) const;
+ Vector <VolumeInterface *> getAllVolumeInterfaces_l() const;
+
+ sp<ThreadBase> openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t device,
const String8& address,
audio_source_t source,
audio_input_flags_t flags);
- sp<PlaybackThread> openOutput_l(audio_module_handle_t module,
+ sp<ThreadBase> openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
@@ -722,6 +755,12 @@
// list of sessions for which a valid HW A/V sync ID was retrieved from the HAL
DefaultKeyedVector< audio_session_t , audio_hw_sync_t >mHwAvSyncIds;
+
+ // list of MMAP stream control threads. Those threads allow for wake lock, routing
+ // and volume control for activity on the associated MMAP stream at the HAL.
+ // Audio data transfer is directly handled by the client creating the MMAP stream
+ DefaultKeyedVector< audio_io_handle_t, sp<MmapThread> > mMmapThreads;
+
private:
sp<Client> registerPid(pid_t pid); // always returns non-0
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 70929e4..1262746 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1225,10 +1225,9 @@
mEnabled = false;
} else {
if (thread != 0) {
- if (thread->type() == ThreadBase::OFFLOAD) {
- PlaybackThread *t = (PlaybackThread *)thread.get();
- Mutex::Autolock _l(t->mLock);
- t->broadcast_l();
+ if (thread->type() == ThreadBase::OFFLOAD || thread->type() == ThreadBase::MMAP) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->broadcast_l();
}
if (!effect->isOffloadable()) {
if (thread->type() == ThreadBase::OFFLOAD) {
@@ -1270,10 +1269,9 @@
sp<ThreadBase> thread = effect->thread().promote();
if (thread != 0) {
thread->checkSuspendOnEffectEnabled(effect, false, effect->sessionId());
- if (thread->type() == ThreadBase::OFFLOAD) {
- PlaybackThread *t = (PlaybackThread *)thread.get();
- Mutex::Autolock _l(t->mLock);
- t->broadcast_l();
+ if (thread->type() == ThreadBase::OFFLOAD || thread->type() == ThreadBase::MMAP) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->broadcast_l();
}
}
@@ -1581,6 +1579,9 @@
// Must be called with EffectChain::mLock locked
void AudioFlinger::EffectChain::clearInputBuffer_l(const sp<ThreadBase>& thread)
{
+ if (mInBuffer == NULL) {
+ return;
+ }
// TODO: This will change in the future, depending on multichannel
// and sample format changes for effects.
// Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
new file mode 100644
index 0000000..e4fe8ac
--- /dev/null
+++ b/services/audioflinger/MmapTracks.h
@@ -0,0 +1,60 @@
+/*
+**
+** Copyright 2017, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+// playback track
+class MmapTrack : public TrackBase {
+public:
+ MmapTrack(ThreadBase *thread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_session_t sessionId,
+ uid_t uid,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
+ virtual ~MmapTrack();
+
+ // TrackBase virtual
+ virtual status_t initCheck() const;
+ virtual status_t start(AudioSystem::sync_event_t event,
+ audio_session_t triggerSession);
+ virtual void stop();
+ virtual bool isFastTrack() const { return false; }
+
+ static void appendDumpHeader(String8& result);
+ void dump(char* buffer, size_t size);
+
+protected:
+ friend class MmapThread;
+
+ MmapTrack(const MmapTrack&);
+ MmapTrack& operator = (const MmapTrack&);
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ // releaseBuffer() not overridden
+
+ // ExtendedAudioBufferProvider interface
+ virtual size_t framesReady() const;
+ virtual int64_t framesReleased() const;
+ virtual void onTimestamp(const ExtendedTimestamp ×tamp);
+
+}; // end of Track
+
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 591a49e..d7c0728 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -274,13 +274,14 @@
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- newPatch->mPlaybackThread = audioflinger->openOutput_l(
- patch->sinks[0].ext.device.hw_module,
- &output,
- &config,
- device,
- address,
- AUDIO_OUTPUT_FLAG_NONE);
+ sp<ThreadBase> thread = audioflinger->openOutput_l(
+ patch->sinks[0].ext.device.hw_module,
+ &output,
+ &config,
+ device,
+ address,
+ AUDIO_OUTPUT_FLAG_NONE);
+ newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
ALOGV("audioflinger->openOutput_l() returned %p",
newPatch->mPlaybackThread.get());
if (newPatch->mPlaybackThread == 0) {
@@ -310,13 +311,14 @@
config.format = newPatch->mPlaybackThread->format();
}
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- newPatch->mRecordThread = audioflinger->openInput_l(srcModule,
+ sp<ThreadBase> thread = audioflinger->openInput_l(srcModule,
&input,
&config,
device,
address,
AUDIO_SOURCE_MIC,
AUDIO_INPUT_FLAG_NONE);
+ newPatch->mRecordThread = (RecordThread *)thread.get();
ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
newPatch->mRecordThread.get(), config.channel_mask);
if (newPatch->mRecordThread == 0) {
@@ -332,10 +334,13 @@
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
- status = BAD_VALUE;
- goto exit;
+ thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("createAudioPatch() bad capture I/O handle %d",
+ patch->sinks[0].ext.mix.handle);
+ status = BAD_VALUE;
+ goto exit;
+ }
}
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
} else {
@@ -376,10 +381,13 @@
sp<ThreadBase> thread =
audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
- status = BAD_VALUE;
- goto exit;
+ thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("createAudioPatch() bad playback I/O handle %d",
+ patch->sources[0].ext.mix.handle);
+ status = BAD_VALUE;
+ goto exit;
+ }
}
if (thread == audioflinger->primaryPlaybackThread_l()) {
AudioParameter param = AudioParameter();
@@ -606,10 +614,13 @@
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
- status = BAD_VALUE;
- break;
+ thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("releaseAudioPatch() bad capture I/O handle %d",
+ patch->sinks[0].ext.mix.handle);
+ status = BAD_VALUE;
+ break;
+ }
}
status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
@@ -629,10 +640,13 @@
sp<ThreadBase> thread =
audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
- status = BAD_VALUE;
- break;
+ thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("releaseAudioPatch() bad playback I/O handle %d",
+ patch->sources[0].ext.mix.handle);
+ status = BAD_VALUE;
+ break;
+ }
}
status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} break;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 27e4627..72a3777 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -118,10 +118,9 @@
public:
void triggerEvents(AudioSystem::sync_event_t type);
- void invalidate();
+ virtual void invalidate();
void disable();
- bool isInvalid() const { return mIsInvalid; }
int fastIndex() const { return mFastIndex; }
protected:
@@ -166,7 +165,6 @@
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
- bool mIsInvalid; // non-resettable latch, set by invalidate()
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 848e531..72ebc93 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -42,7 +42,7 @@
void destroy();
- void invalidate();
+ virtual void invalidate();
// clear the buffer overflow flag
void clearOverflow() { mOverflow = false; }
// set the buffer overflow flag and return previous value
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b1ede30..2009fca 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -74,6 +74,9 @@
#include "AutoPark.h"
+#include <pthread.h>
+#include "TypedLogger.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -507,7 +510,8 @@
mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
// mName will be set by concrete (non-virtual) subclass
mDeathRecipient(new PMDeathRecipient(this)),
- mSystemReady(systemReady)
+ mSystemReady(systemReady),
+ mSignalPending(false)
{
memset(&mPatch, 0, sizeof(struct audio_patch));
}
@@ -610,16 +614,17 @@
sendConfigEvent_l(configEvent);
}
-void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio)
+void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp)
{
Mutex::Autolock _l(mLock);
- sendPrioConfigEvent_l(pid, tid, prio);
+ sendPrioConfigEvent_l(pid, tid, prio, forApp);
}
// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
+void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(
+ pid_t pid, pid_t tid, int32_t prio, bool forApp)
{
- sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio);
+ sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp);
sendConfigEvent_l(configEvent);
}
@@ -679,7 +684,7 @@
case CFG_EVENT_PRIO: {
PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get();
// FIXME Need to understand why this has to be done asynchronously
- int err = requestPriority(data->mPid, data->mTid, data->mPrio,
+ int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp,
true /*asynchronous*/);
if (err != 0) {
ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
@@ -867,6 +872,8 @@
return String16("AudioIn");
case OFFLOAD:
return String16("AudioOffload");
+ case MMAP:
+ return String16("Mmap");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
@@ -1591,6 +1598,16 @@
}
}
+void AudioFlinger::ThreadBase::broadcast_l()
+{
+ // Thread could be blocked waiting for async
+ // so signal it to handle state changes immediately
+ // If threadLoop is currently unlocked a signal of mWaitWorkCV will
+ // be lost so we also flag to prevent it blocking on mWaitWorkCV
+ mSignalPending = true;
+ mWaitWorkCV.broadcast();
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -1627,7 +1644,6 @@
mUseAsyncWrite(false),
mWriteAckSequence(0),
mDrainSequence(0),
- mSignalPending(false),
mScreenState(AudioFlinger::mScreenState),
// index 0 is reserved for normal mixer's submix
mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
@@ -2014,7 +2030,7 @@
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
- sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+ sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*isForApp*/);
}
}
@@ -2058,6 +2074,9 @@
void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
{
+ if (isDuplicating()) {
+ return;
+ }
Mutex::Autolock _l(mLock);
// Don't apply master mute in SW if our HAL can do it for us.
if (mOutput && mOutput->audioHwDev &&
@@ -2199,16 +2218,6 @@
}
}
-void AudioFlinger::PlaybackThread::broadcast_l()
-{
- // Thread could be blocked waiting for async
- // so signal it to handle state changes immediately
- // If threadLoop is currently unlocked a signal of mWaitWorkCV will
- // be lost so we also flag to prevent it blocking on mWaitWorkCV
- mSignalPending = true;
- mWaitWorkCV.broadcast();
-}
-
String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
{
Mutex::Autolock _l(mLock);
@@ -2906,6 +2915,8 @@
bool AudioFlinger::PlaybackThread::threadLoop()
{
+ logWriterTLS = mNBLogWriter.get();
+
Vector< sp<Track> > tracksToRemove;
mStandbyTimeNs = systemTime();
@@ -2937,7 +2948,9 @@
const char *logString = NULL;
checkSilentMode_l();
-
+#if 0
+ int z = 0; // used in logFormat example
+#endif
while (!exitPending())
{
cpuStats.sample(myName);
@@ -3026,7 +3039,17 @@
}
}
}
-
+#if 0
+ // logFormat example
+ if (!(z % 100)) {
+ timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ LOGF("This is an integer %d, this is a float %f, this is my "
+ "pid %p %% %s %t", 42, 3.14, "and this is a timestamp", ts);
+ LOGF("A deceptive null-terminated string %\0");
+ }
+ ++z;
+#endif
saveOutputTracks();
if (mSignalPending) {
// A signal was raised while we were unlocked
@@ -3656,7 +3679,7 @@
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false);
stream()->setHalThreadPriority(kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
@@ -5793,6 +5816,7 @@
MixerThread::cacheParameters_l();
}
+
// ----------------------------------------------------------------------------
// Record
// ----------------------------------------------------------------------------
@@ -5919,7 +5943,7 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture, false);
stream()->setHalThreadPriority(kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
@@ -6579,7 +6603,7 @@
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
- sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+ sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true);
}
}
@@ -7340,4 +7364,889 @@
config->ext.mix.usecase.source = mAudioSource;
}
+// ----------------------------------------------------------------------------
+// Mmap
+// ----------------------------------------------------------------------------
+
+AudioFlinger::MmapThreadHandle::MmapThreadHandle(const sp<MmapThread>& thread)
+ : mThread(thread)
+{
+}
+
+AudioFlinger::MmapThreadHandle::~MmapThreadHandle()
+{
+ MmapThread *thread = mThread.get();
+ // clear our strong reference before disconnecting the thread: the last strong reference
+ // will be removed when closeInput/closeOutput is executed upono call from audio policy manager
+ // and the thread removed from mMMapThreads list causing the thread destruction.
+ mThread.clear();
+ if (thread != nullptr) {
+ thread->disconnect();
+ }
+}
+
+status_t AudioFlinger::MmapThreadHandle::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->createMmapBuffer(minSizeFrames, info);
+}
+
+status_t AudioFlinger::MmapThreadHandle::getMmapPosition(struct audio_mmap_position *position)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->getMmapPosition(position);
+}
+
+status_t AudioFlinger::MmapThreadHandle::start(const MmapStreamInterface::Client& client, audio_port_handle_t *handle)
+
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->start(client, handle);
+}
+
+status_t AudioFlinger::MmapThreadHandle::stop(audio_port_handle_t handle)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->stop(handle);
+}
+
+
+AudioFlinger::MmapThread::MmapThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, sp<StreamHalInterface> stream,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : ThreadBase(audioFlinger, id, outDevice, inDevice, MMAP, systemReady),
+ mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev)
+{
+ readHalParameters_l();
+}
+
+AudioFlinger::MmapThread::~MmapThread()
+{
+}
+
+void AudioFlinger::MmapThread::onFirstRef()
+{
+ run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+void AudioFlinger::MmapThread::disconnect()
+{
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ stop(t->portId());
+ }
+ // this will cause the destruction of this thread.
+ if (isOutput()) {
+ AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ } else {
+ AudioSystem::releaseInput(mId, mSessionId);
+ }
+}
+
+
+void AudioFlinger::MmapThread::configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType __unused,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId)
+{
+ mAttr = *attr;
+ mSessionId = sessionId;
+ mCallback = callback;
+ mPortId = portId;
+}
+
+status_t AudioFlinger::MmapThread::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info)
+{
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+ return mHalStream->createMmapBuffer(minSizeFrames, info);
+}
+
+status_t AudioFlinger::MmapThread::getMmapPosition(struct audio_mmap_position *position)
+{
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+ return mHalStream->getMmapPosition(position);
+}
+
+status_t AudioFlinger::MmapThread::start(const MmapStreamInterface::Client& client,
+ audio_port_handle_t *handle)
+{
+ ALOGV("%s clientUid %d", __FUNCTION__, client.clientUid);
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+
+ status_t ret;
+ audio_session_t sessionId;
+ audio_port_handle_t portId;
+
+ if (mActiveTracks.size() == 0) {
+ // for the first track, reuse portId and session allocated when the stream was opened
+ mHalStream->start();
+ portId = mPortId;
+ sessionId = mSessionId;
+ } else {
+ // for other tracks than first one, get a new port ID from APM.
+ sessionId = (audio_session_t)mAudioFlinger->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ audio_io_handle_t io;
+ if (isOutput()) {
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = mSampleRate;
+ config.channel_mask = mChannelMask;
+ config.format = mFormat;
+ audio_stream_type_t stream = streamType();
+ audio_output_flags_t flags =
+ (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
+ ret = AudioSystem::getOutputForAttr(&mAttr, &io,
+ sessionId,
+ &stream,
+ client.clientUid,
+ &config,
+ flags,
+ AUDIO_PORT_HANDLE_NONE,
+ &portId);
+ } else {
+ audio_config_base_t config;
+ config.sample_rate = mSampleRate;
+ config.channel_mask = mChannelMask;
+ config.format = mFormat;
+ ret = AudioSystem::getInputForAttr(&mAttr, &io,
+ sessionId,
+ client.clientPid,
+ client.clientUid,
+ &config,
+ AUDIO_INPUT_FLAG_MMAP_NOIRQ,
+ AUDIO_PORT_HANDLE_NONE,
+ &portId);
+ }
+ // APM should not chose a different input or output stream for the same set of attributes
+ // and audo configuration
+ if (ret != NO_ERROR || io != mId) {
+ ALOGE("%s: error getting output or input from APM (error %d, io %d expected io %d)",
+ __FUNCTION__, ret, io, mId);
+ return BAD_VALUE;
+ }
+ }
+
+ if (isOutput()) {
+ ret = AudioSystem::startOutput(mId, streamType(), sessionId);
+ } else {
+ ret = AudioSystem::startInput(mId, sessionId);
+ }
+
+ // abort if start is rejected by audio policy manager
+ if (ret != NO_ERROR) {
+ if (mActiveTracks.size() != 0) {
+ if (isOutput()) {
+ AudioSystem::releaseOutput(mId, streamType(), sessionId);
+ } else {
+ AudioSystem::releaseInput(mId, sessionId);
+ }
+ }
+ return PERMISSION_DENIED;
+ }
+
+ sp<MmapTrack> track = new MmapTrack(this, mSampleRate, mFormat, mChannelMask, sessionId,
+ client.clientUid, portId);
+
+ mActiveTracks.add(track);
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+ chain->incTrackCnt();
+ chain->incActiveTrackCnt();
+ }
+
+ *handle = portId;
+
+ broadcast_l();
+
+ ALOGV("%s DONE handle %d", __FUNCTION__, portId);
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::MmapThread::stop(audio_port_handle_t handle)
+{
+
+ ALOGV("%s handle %d", __FUNCTION__, handle);
+
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+
+ sp<MmapTrack> track;
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ if (handle == t->portId()) {
+ track = t;
+ break;
+ }
+ }
+ if (track == 0) {
+ return BAD_VALUE;
+ }
+
+ mActiveTracks.remove(track);
+
+ if (isOutput()) {
+ AudioSystem::stopOutput(mId, streamType(), track->sessionId());
+ if (mActiveTracks.size() != 0) {
+ AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+ }
+ } else {
+ AudioSystem::stopInput(mId, track->sessionId());
+ if (mActiveTracks.size() != 0) {
+ AudioSystem::releaseInput(mId, track->sessionId());
+ }
+ }
+
+ sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ chain->decActiveTrackCnt();
+ chain->decTrackCnt();
+ }
+
+ broadcast_l();
+
+ if (mActiveTracks.size() == 0) {
+ mHalStream->stop();
+ }
+ return NO_ERROR;
+}
+
+
+void AudioFlinger::MmapThread::readHalParameters_l()
+{
+ status_t result = mHalStream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
+ mFormat = mHALFormat;
+ LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+ result = mHalStream->getFrameSize(&mFrameSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+ result = mHalStream->getBufferSize(&mBufferSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
+ mFrameCount = mBufferSize / mFrameSize;
+}
+
+bool AudioFlinger::MmapThread::threadLoop()
+{
+ acquireWakeLock();
+
+ checkSilentMode_l();
+
+ const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
+
+ while (!exitPending())
+ {
+ Mutex::Autolock _l(mLock);
+ Vector< sp<EffectChain> > effectChains;
+
+ if (mSignalPending) {
+ // A signal was raised while we were unlocked
+ mSignalPending = false;
+ } else {
+ if (mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
+
+ if (exitPending()) {
+ break;
+ }
+
+ bool wakelockReleased = false;
+ if (mActiveTracks.size() == 0) {
+ releaseWakeLock_l();
+ wakelockReleased = true;
+ }
+ // wait until we have something to do...
+ ALOGV("%s going to sleep", myName.string());
+ mWaitWorkCV.wait(mLock);
+ ALOGV("%s waking up", myName.string());
+ if (wakelockReleased) {
+ acquireWakeLock_l();
+ }
+
+ checkSilentMode_l();
+
+ continue;
+ }
+ }
+
+ processConfigEvents_l();
+
+ processVolume_l();
+
+ checkInvalidTracks_l();
+
+ mActiveTracks.updatePowerState(this);
+
+ lockEffectChains_l(effectChains);
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ effectChains[i]->process_l();
+ }
+ // enable changes in effect chain
+ unlockEffectChains(effectChains);
+ // Effect chains will be actually deleted here if they were removed from
+ // mEffectChains list during mixing or effects processing
+ }
+
+ threadLoop_exit();
+
+ if (!mStandby) {
+ threadLoop_standby();
+ mStandby = true;
+ }
+
+ releaseWakeLock();
+
+ ALOGV("Thread %p type %d exiting", this, mType);
+ return false;
+}
+
+// checkForNewParameter_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::MmapThread::checkForNewParameter_l(const String8& keyValuePair,
+ status_t& status)
+{
+ AudioParameter param = AudioParameter(keyValuePair);
+ int value;
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ if (value != AUDIO_DEVICE_NONE) {
+ mOutDevice = value;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(mOutDevice);
+ }
+ }
+ }
+ status = mHalStream->setParameters(keyValuePair);
+
+ return false;
+}
+
+String8 AudioFlinger::MmapThread::getParameters(const String8& keys)
+{
+ Mutex::Autolock _l(mLock);
+ String8 out_s8;
+ if (initCheck() == NO_ERROR && mHalStream->getParameters(keys, &out_s8) == OK) {
+ return out_s8;
+ }
+ return String8();
+}
+
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
+ sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+
+ desc->mIoHandle = mId;
+
+ switch (event) {
+ case AUDIO_INPUT_OPENED:
+ case AUDIO_INPUT_CONFIG_CHANGED:
+ case AUDIO_OUTPUT_OPENED:
+ case AUDIO_OUTPUT_CONFIG_CHANGED:
+ desc->mPatch = mPatch;
+ desc->mChannelMask = mChannelMask;
+ desc->mSamplingRate = mSampleRate;
+ desc->mFormat = mFormat;
+ desc->mFrameCount = mFrameCount;
+ desc->mFrameCountHAL = mFrameCount;
+ desc->mLatency = 0;
+ break;
+
+ case AUDIO_INPUT_CLOSED:
+ case AUDIO_OUTPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->ioConfigChanged(event, desc, pid);
+}
+
+status_t AudioFlinger::MmapThread::createAudioPatch_l(const struct audio_patch *patch,
+ audio_patch_handle_t *handle)
+{
+ status_t status = NO_ERROR;
+
+ // store new device and send to effects
+ audio_devices_t type = AUDIO_DEVICE_NONE;
+ audio_port_handle_t deviceId;
+ if (isOutput()) {
+ for (unsigned int i = 0; i < patch->num_sinks; i++) {
+ type |= patch->sinks[i].ext.device.type;
+ }
+ deviceId = patch->sinks[0].id;
+ } else {
+ type = patch->sources[0].ext.device.type;
+ deviceId = patch->sources[0].id;
+ }
+
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(type);
+ }
+
+ if (isOutput()) {
+ mOutDevice = type;
+ } else {
+ mInDevice = type;
+ // store new source and send to effects
+ if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
+ mAudioSource = patch->sinks[0].ext.mix.usecase.source;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setAudioSource_l(mAudioSource);
+ }
+ }
+ }
+
+ if (mAudioHwDev->supportsAudioPatches()) {
+ status = mHalDevice->createAudioPatch(patch->num_sources,
+ patch->sources,
+ patch->num_sinks,
+ patch->sinks,
+ handle);
+ } else {
+ char *address;
+ if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
+ //FIXME: we only support address on first sink with HAL version < 3.0
+ address = audio_device_address_to_parameter(
+ patch->sinks[0].ext.device.type,
+ patch->sinks[0].ext.device.address);
+ } else {
+ address = (char *)calloc(1, 1);
+ }
+ AudioParameter param = AudioParameter(String8(address));
+ free(address);
+ param.addInt(String8(AudioParameter::keyRouting), (int)type);
+ if (!isOutput()) {
+ param.addInt(String8(AudioParameter::keyInputSource),
+ (int)patch->sinks[0].ext.mix.usecase.source);
+ }
+ status = mHalStream->setParameters(param.toString());
+ *handle = AUDIO_PATCH_HANDLE_NONE;
+ }
+
+ if (isOutput() && mPrevOutDevice != mOutDevice) {
+ mPrevOutDevice = type;
+ sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
+ if (mCallback != 0) {
+ mCallback->onRoutingChanged(deviceId);
+ }
+ }
+ if (!isOutput() && mPrevInDevice != mInDevice) {
+ mPrevInDevice = type;
+ sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
+ if (mCallback != 0) {
+ mCallback->onRoutingChanged(deviceId);
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::MmapThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
+{
+ status_t status = NO_ERROR;
+
+ mInDevice = AUDIO_DEVICE_NONE;
+
+ bool supportsAudioPatches = mHalDevice->supportsAudioPatches(&supportsAudioPatches) == OK ?
+ supportsAudioPatches : false;
+
+ if (supportsAudioPatches) {
+ status = mHalDevice->releaseAudioPatch(handle);
+ } else {
+ AudioParameter param;
+ param.addInt(String8(AudioParameter::keyRouting), 0);
+ status = mHalStream->setParameters(param.toString());
+ }
+ return status;
+}
+
+void AudioFlinger::MmapThread::getAudioPortConfig(struct audio_port_config *config)
+{
+ ThreadBase::getAudioPortConfig(config);
+ if (isOutput()) {
+ config->role = AUDIO_PORT_ROLE_SOURCE;
+ config->ext.mix.hw_module = mAudioHwDev->handle();
+ config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+ } else {
+ config->role = AUDIO_PORT_ROLE_SINK;
+ config->ext.mix.hw_module = mAudioHwDev->handle();
+ config->ext.mix.usecase.source = mAudioSource;
+ }
+}
+
+status_t AudioFlinger::MmapThread::addEffectChain_l(const sp<EffectChain>& chain)
+{
+ audio_session_t session = chain->sessionId();
+
+ ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+ // Attach all tracks with same session ID to this chain.
+ // indicate all active tracks in the chain
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (session == track->sessionId()) {
+ chain->incTrackCnt();
+ chain->incActiveTrackCnt();
+ }
+ }
+
+ chain->setThread(this);
+ chain->setInBuffer(nullptr);
+ chain->setOutBuffer(nullptr);
+ chain->syncHalEffectsState();
+
+ mEffectChains.add(chain);
+ checkSuspendOnAddEffectChain_l(chain);
+ return NO_ERROR;
+}
+
+size_t AudioFlinger::MmapThread::removeEffectChain_l(const sp<EffectChain>& chain)
+{
+ audio_session_t session = chain->sessionId();
+
+ ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
+
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ if (chain == mEffectChains[i]) {
+ mEffectChains.removeAt(i);
+ // detach all active tracks from the chain
+ // detach all tracks with same session ID from this chain
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (session == track->sessionId()) {
+ chain->decActiveTrackCnt();
+ chain->decTrackCnt();
+ }
+ }
+ break;
+ }
+ }
+ return mEffectChains.size();
+}
+
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::MmapThread::hasAudioSession_l(audio_session_t sessionId) const
+{
+ uint32_t result = 0;
+ if (getEffectChain_l(sessionId) != 0) {
+ result = EFFECT_SESSION;
+ }
+
+ for (size_t i = 0; i < mActiveTracks.size(); i++) {
+ sp<MmapTrack> track = mActiveTracks[i];
+ if (sessionId == track->sessionId()) {
+ result |= TRACK_SESSION;
+ if (track->isFastTrack()) {
+ result |= FAST_SESSION;
+ }
+ break;
+ }
+ }
+
+ return result;
+}
+
+void AudioFlinger::MmapThread::threadLoop_standby()
+{
+ mHalStream->standby();
+}
+
+void AudioFlinger::MmapThread::threadLoop_exit()
+{
+ if (mCallback != 0) {
+ mCallback->onTearDown();
+ }
+}
+
+status_t AudioFlinger::MmapThread::setSyncEvent(const sp<SyncEvent>& event __unused)
+{
+ return BAD_VALUE;
+}
+
+bool AudioFlinger::MmapThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const
+{
+ return false;
+}
+
+status_t AudioFlinger::MmapThread::checkEffectCompatibility_l(
+ const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+ // No global effect sessions on mmap threads
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+
+ if (!isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC)) {
+ ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on capture mmap thread",
+ desc->name);
+ return BAD_VALUE;
+ }
+ if (isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+ ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback mmap thread",
+ desc->name);
+ return BAD_VALUE;
+ }
+
+ // Only allow effects without processing load or latency
+ if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) != EFFECT_FLAG_NO_PROCESS) {
+ return BAD_VALUE;
+ }
+
+ return NO_ERROR;
+
+}
+
+void AudioFlinger::MmapThread::checkInvalidTracks_l()
+{
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (track->isInvalid()) {
+ if (mCallback != 0) {
+ mCallback->onTearDown();
+ }
+ break;
+ }
+ }
+}
+
+void AudioFlinger::MmapThread::dump(int fd, const Vector<String16>& args)
+{
+ dumpInternals(fd, args);
+ dumpTracks(fd, args);
+ dumpEffectChains(fd, args);
+}
+
+void AudioFlinger::MmapThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ dprintf(fd, "\nMmap thread %p:\n", this);
+
+ dumpBase(fd, args);
+
+ dprintf(fd, " Attributes: content type %d usage %d source %d\n",
+ mAttr.content_type, mAttr.usage, mAttr.source);
+ dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
+ if (mActiveTracks.size() == 0) {
+ dprintf(fd, " No active clients\n");
+ }
+}
+
+void AudioFlinger::MmapThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ size_t numtracks = mActiveTracks.size();
+ dprintf(fd, " %zu Tracks", numtracks);
+ if (numtracks) {
+ MmapTrack::appendDumpHeader(result);
+ for (size_t i = 0; i < numtracks ; ++i) {
+ sp<MmapTrack> track = mActiveTracks[i];
+ track->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ } else {
+ dprintf(fd, "\n");
+ }
+ write(fd, result.string(), result.size());
+}
+
+AudioFlinger::MmapPlaybackThread::MmapPlaybackThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamOut *output,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : MmapThread(audioFlinger, id, hwDev, output->stream, outDevice, inDevice, systemReady),
+ mStreamType(AUDIO_STREAM_MUSIC),
+ mStreamVolume(1.0), mStreamMute(false), mOutput(output)
+{
+ snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
+ mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
+ mMasterVolume = audioFlinger->masterVolume_l();
+ mMasterMute = audioFlinger->masterMute_l();
+ if (mAudioHwDev) {
+ if (mAudioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ }
+
+ if (mAudioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId)
+{
+ MmapThread::configure(attr, streamType, sessionId, callback, portId);
+ mStreamType = streamType;
+}
+
+AudioStreamOut* AudioFlinger::MmapPlaybackThread::clearOutput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamOut *output = mOutput;
+ mOutput = NULL;
+ return output;
+}
+
+void AudioFlinger::MmapPlaybackThread::setMasterVolume(float value)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master volume in SW if our HAL can do it for us.
+ if (mAudioHwDev &&
+ mAudioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ } else {
+ mMasterVolume = value;
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::setMasterMute(bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master mute in SW if our HAL can do it for us.
+ if (mAudioHwDev && mAudioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ } else {
+ mMasterMute = muted;
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ mStreamVolume = value;
+ broadcast_l();
+ }
+}
+
+float AudioFlinger::MmapPlaybackThread::streamVolume(audio_stream_type_t stream) const
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ return mStreamVolume;
+ }
+ return 0.0f;
+}
+
+void AudioFlinger::MmapPlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ mStreamMute= muted;
+ broadcast_l();
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+{
+ Mutex::Autolock _l(mLock);
+ if (streamType == mStreamType) {
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ track->invalidate();
+ }
+ broadcast_l();
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::processVolume_l()
+{
+ float volume;
+
+ if (mMasterMute || mStreamMute) {
+ volume = 0;
+ } else {
+ volume = mMasterVolume * mStreamVolume;
+ }
+
+ if (volume != mHalVolFloat) {
+ mHalVolFloat = volume;
+
+ // Convert volumes from float to 8.24
+ uint32_t vol = (uint32_t)(volume * (1 << 24));
+
+ // Delegate volume control to effect in track effect chain if needed
+ // only one effect chain can be present on DirectOutputThread, so if
+ // there is one, the track is connected to it
+ if (!mEffectChains.isEmpty()) {
+ mEffectChains[0]->setVolume_l(&vol, &vol);
+ volume = (float)vol / (1 << 24);
+ }
+
+ mOutput->stream->setVolume(volume, volume);
+
+ if (mCallback != 0) {
+ int channelCount;
+ if (isOutput()) {
+ channelCount = audio_channel_count_from_out_mask(mChannelMask);
+ } else {
+ channelCount = audio_channel_count_from_in_mask(mChannelMask);
+ }
+ Vector<float> values;
+ for (int i = 0; i < channelCount; i++) {
+ values.add(volume);
+ }
+ mCallback->onVolumeChanged(mChannelMask, values);
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
+{
+ if (!mMasterMute) {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("ro.audio.silent", value, "0") > 0) {
+ char *endptr;
+ unsigned long ul = strtoul(value, &endptr, 0);
+ if (*endptr == '\0' && ul != 0) {
+ ALOGD("Silence is golden");
+ // The setprop command will not allow a property to be changed after
+ // the first time it is set, so we don't have to worry about un-muting.
+ setMasterMute_l(true);
+ }
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ MmapThread::dumpInternals(fd, args);
+
+ dprintf(fd, " Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n", mStreamType, mStreamVolume, mHalVolFloat, mStreamMute);
+ dprintf(fd, " Master volume: %f Master mute %d\n", mMasterVolume, mMasterMute);
+}
+
+AudioFlinger::MmapCaptureThread::MmapCaptureThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamIn *input,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : MmapThread(audioFlinger, id, hwDev, input->stream, outDevice, inDevice, systemReady),
+ mInput(input)
+{
+ snprintf(mThreadName, kThreadNameLength, "AudioMmapIn_%X", id);
+ mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+}
+
+AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamIn *input = mInput;
+ mInput = NULL;
+ return input;
+}
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 3fb0b07..0732a7b 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -29,7 +29,8 @@
DIRECT, // Thread class is DirectOutputThread
DUPLICATING, // Thread class is DuplicatingThread
RECORD, // Thread class is RecordThread
- OFFLOAD // Thread class is OffloadThread
+ OFFLOAD, // Thread class is OffloadThread
+ MMAP // control thread for MMAP stream
};
static const char *threadTypeToString(type_t type);
@@ -126,23 +127,25 @@
class PrioConfigEventData : public ConfigEventData {
public:
- PrioConfigEventData(pid_t pid, pid_t tid, int32_t prio) :
- mPid(pid), mTid(tid), mPrio(prio) {}
+ PrioConfigEventData(pid_t pid, pid_t tid, int32_t prio, bool forApp) :
+ mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio);
+ snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+ mPid, mTid, mPrio, mForApp);
}
const pid_t mPid;
const pid_t mTid;
const int32_t mPrio;
+ const bool mForApp;
};
class PrioConfigEvent : public ConfigEvent {
public:
- PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
+ PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp) :
ConfigEvent(CFG_EVENT_PRIO, true) {
- mData = new PrioConfigEventData(pid, tid, prio);
+ mData = new PrioConfigEventData(pid, tid, prio, forApp);
}
virtual ~PrioConfigEvent() {}
};
@@ -267,8 +270,8 @@
status_t sendConfigEvent_l(sp<ConfigEvent>& event);
void sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0);
void sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0);
- void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio);
- void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
+ void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
+ void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
status_t sendSetParameterConfigEvent_l(const String8& keyValuePair);
status_t sendCreateAudioPatchConfigEvent(const struct audio_patch *patch,
audio_patch_handle_t *handle);
@@ -390,6 +393,8 @@
virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
audio_session_t sessionId) = 0;
+ void broadcast_l();
+
mutable Mutex mLock;
protected:
@@ -482,6 +487,9 @@
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
ExtendedTimestamp mTimestamp;
+ // A condition that must be evaluated by the thread loop has changed and
+ // we must not wait for async write callback in the thread loop before evaluating it
+ bool mSignalPending;
// ActiveTracks is a sorted vector of track type T representing the
// active tracks of threadLoop() to be considered by the locked prepare portion.
@@ -569,8 +577,22 @@
};
};
+class VolumeInterface {
+ public:
+
+ virtual ~VolumeInterface() {}
+
+ virtual void setMasterVolume(float value) = 0;
+ virtual void setMasterMute(bool muted) = 0;
+ virtual void setStreamVolume(audio_stream_type_t stream, float value) = 0;
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted) = 0;
+ virtual float streamVolume(audio_stream_type_t stream) const = 0;
+
+};
+
// --- PlaybackThread ---
-class PlaybackThread : public ThreadBase, public StreamOutHalInterfaceCallback {
+class PlaybackThread : public ThreadBase, public StreamOutHalInterfaceCallback,
+ public VolumeInterface {
public:
#include "PlaybackTracks.h"
@@ -659,13 +681,12 @@
// same, but lock must already be held
uint32_t latency_l() const;
- void setMasterVolume(float value);
- void setMasterMute(bool muted);
-
- void setStreamVolume(audio_stream_type_t stream, float value);
- void setStreamMute(audio_stream_type_t stream, bool muted);
-
- float streamVolume(audio_stream_type_t stream) const;
+ // VolumeInterface
+ virtual void setMasterVolume(float value);
+ virtual void setMasterMute(bool muted);
+ virtual void setStreamVolume(audio_stream_type_t stream, float value);
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted);
+ virtual float streamVolume(audio_stream_type_t stream) const;
sp<Track> createTrack_l(
const sp<AudioFlinger::Client>& client,
@@ -868,7 +889,6 @@
status_t addTrack_l(const sp<Track>& track);
bool destroyTrack_l(const sp<Track>& track);
void removeTrack_l(const sp<Track>& track);
- void broadcast_l();
void readOutputParameters_l();
@@ -930,9 +950,6 @@
// Bit 0 is reset by the async callback thread calling resetDraining(). Out of sequence
// callbacks are ignored.
uint32_t mDrainSequence;
- // A condition that must be evaluated by prepareTrack_l() has changed and we must not wait
- // for async write callback in the thread loop before evaluating it
- bool mSignalPending;
sp<AsyncCallbackThread> mCallbackThread;
private:
@@ -1274,7 +1291,6 @@
virtual bool hasFastMixer() const { return false; }
};
-
// record thread
class RecordThread : public ThreadBase
{
@@ -1491,3 +1507,153 @@
bool mFastTrackAvail; // true if fast track available
};
+
+class MmapThread : public ThreadBase
+{
+ public:
+
+#include "MmapTracks.h"
+
+ MmapThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, sp<StreamHalInterface> stream,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapThread();
+
+ virtual void configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId);
+
+ void disconnect();
+
+ // MmapStreamInterface
+ status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+ status_t getMmapPosition(struct audio_mmap_position *position);
+ status_t start(const MmapStreamInterface::Client& client, audio_port_handle_t *handle);
+ status_t stop(audio_port_handle_t handle);
+
+ // RefBase
+ virtual void onFirstRef();
+
+ // Thread virtuals
+ virtual bool threadLoop();
+
+ virtual void threadLoop_exit();
+ virtual void threadLoop_standby();
+
+ virtual status_t initCheck() const { return (mHalStream == 0) ? NO_INIT : NO_ERROR; }
+ virtual size_t frameCount() const { return mFrameCount; }
+ virtual bool checkForNewParameter_l(const String8& keyValuePair,
+ status_t& status);
+ virtual String8 getParameters(const String8& keys);
+ virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0);
+ void readHalParameters_l();
+ virtual void cacheParameters_l() {}
+ virtual status_t createAudioPatch_l(const struct audio_patch *patch,
+ audio_patch_handle_t *handle);
+ virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
+ virtual void getAudioPortConfig(struct audio_port_config *config);
+
+ virtual sp<StreamHalInterface> stream() const { return mHalStream; }
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
+ audio_session_t sessionId);
+
+ virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+ virtual void checkSilentMode_l() {}
+ virtual void processVolume_l() {}
+ void checkInvalidTracks_l();
+
+ virtual audio_stream_type_t streamType() { return AUDIO_STREAM_DEFAULT; }
+
+ virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
+
+ void dump(int fd, const Vector<String16>& args);
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+ void dumpTracks(int fd, const Vector<String16>& args);
+
+ virtual bool isOutput() const = 0;
+
+ protected:
+
+ audio_attributes_t mAttr;
+ audio_session_t mSessionId;
+ audio_port_handle_t mPortId;
+
+ sp<MmapStreamCallback> mCallback;
+ sp<StreamHalInterface> mHalStream;
+ sp<DeviceHalInterface> mHalDevice;
+ AudioHwDevice* const mAudioHwDev;
+ ActiveTracks<MmapTrack> mActiveTracks;
+};
+
+class MmapPlaybackThread : public MmapThread, public VolumeInterface
+{
+
+public:
+ MmapPlaybackThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamOut *output,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapPlaybackThread() {}
+
+ virtual void configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId);
+
+ AudioStreamOut* clearOutput();
+
+ // VolumeInterface
+ virtual void setMasterVolume(float value);
+ virtual void setMasterMute(bool muted);
+ virtual void setStreamVolume(audio_stream_type_t stream, float value);
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted);
+ virtual float streamVolume(audio_stream_type_t stream) const;
+
+ void setMasterMute_l(bool muted) { mMasterMute = muted; }
+
+ virtual void invalidateTracks(audio_stream_type_t streamType);
+
+ virtual audio_stream_type_t streamType() { return mStreamType; }
+ virtual void checkSilentMode_l();
+ virtual void processVolume_l();
+
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+
+ virtual bool isOutput() const { return true; }
+
+protected:
+
+ audio_stream_type_t mStreamType;
+ float mMasterVolume;
+ float mStreamVolume;
+ bool mMasterMute;
+ bool mStreamMute;
+ float mHalVolFloat;
+ AudioStreamOut* mOutput;
+};
+
+class MmapCaptureThread : public MmapThread
+{
+
+public:
+ MmapCaptureThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamIn *input,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapCaptureThread() {}
+
+ AudioStreamIn* clearInput();
+
+ virtual bool isOutput() const { return false; }
+
+protected:
+
+ AudioStreamIn* mInput;
+};
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 9ca2d63..e0c09f7 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -77,6 +77,7 @@
audio_track_cblk_t* cblk() const { return mCblk; }
audio_session_t sessionId() const { return mSessionId; }
uid_t uid() const { return mUid; }
+ audio_port_handle_t portId() const { return mPortId; }
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
sp<IMemory> getBuffers() const { return mBufferMemory; }
@@ -86,6 +87,10 @@
bool isPatchTrack() const { return (mType == TYPE_PATCH); }
bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
+ virtual void invalidate() { mIsInvalid = true; }
+ bool isInvalid() const { return mIsInvalid; }
+
+
protected:
TrackBase(const TrackBase&);
TrackBase& operator = (const TrackBase&);
@@ -165,6 +170,7 @@
track_type mType; // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
audio_io_handle_t mThreadIoHandle; // I/O handle of the thread the track is attached to
audio_port_handle_t mPortId; // unique ID for this track used by audio policy
+ bool mIsInvalid; // non-resettable latch, set by invalidate()
};
// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f2dd884..40bcf0a 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -98,7 +98,8 @@
mTerminated(false),
mType(type),
mThreadIoHandle(thread->id()),
- mPortId(portId)
+ mPortId(portId),
+ mIsInvalid(false)
{
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
@@ -365,7 +366,6 @@
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
- mIsInvalid(false),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -1042,8 +1042,8 @@
void AudioFlinger::PlaybackThread::Track::invalidate()
{
+ TrackBase::invalidate();
signalClientFlag(CBLK_INVALID);
- mIsInvalid = true;
}
void AudioFlinger::PlaybackThread::Track::disable()
@@ -1599,6 +1599,7 @@
void AudioFlinger::RecordThread::RecordTrack::invalidate()
{
+ TrackBase::invalidate();
// FIXME should use proxy, and needs work
audio_track_cblk_t* cblk = mCblk;
android_atomic_or(CBLK_INVALID, &cblk->mFlags);
@@ -1735,4 +1736,76 @@
mProxy->releaseBuffer(buffer);
}
+
+
+AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_session_t sessionId,
+ uid_t uid,
+ audio_port_handle_t portId)
+ : TrackBase(thread, NULL, sampleRate, format,
+ channelMask, 0, NULL, sessionId, uid, false,
+ ALLOC_NONE,
+ TYPE_DEFAULT, portId)
+{
+}
+
+AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
+{
+}
+
+status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
+{
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
+ audio_session_t triggerSession __unused)
+{
+ return NO_ERROR;
+}
+
+void AudioFlinger::MmapThread::MmapTrack::stop()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ buffer->frameCount = 0;
+ buffer->raw = nullptr;
+ return INVALID_OPERATION;
+}
+
+// ExtendedAudioBufferProvider interface
+size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
+ return 0;
+}
+
+int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
+{
+ return 0;
+}
+
+void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp ×tamp __unused)
+{
+}
+
+/*static*/ void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
+{
+ result.append(" Client Fmt Chn mask SRate\n");
+}
+
+void AudioFlinger::MmapThread::MmapTrack::dump(char* buffer, size_t size)
+{
+ snprintf(buffer, size, " %6u %3u %08X %5u\n",
+ mUid,
+ mFormat,
+ mChannelMask,
+ mSampleRate);
+
+}
+
} // namespace android
diff --git a/services/audioflinger/TypedLogger.cpp b/services/audioflinger/TypedLogger.cpp
new file mode 100644
index 0000000..b5b1bc5
--- /dev/null
+++ b/services/audioflinger/TypedLogger.cpp
@@ -0,0 +1,25 @@
+/*
+*
+* Copyright 2017, The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <pthread.h>
+#include "TypedLogger.h"
+
+thread_local android::NBLog::Writer *logWriterTLS;
diff --git a/services/audioflinger/TypedLogger.h b/services/audioflinger/TypedLogger.h
new file mode 100644
index 0000000..2d71ab4
--- /dev/null
+++ b/services/audioflinger/TypedLogger.h
@@ -0,0 +1,29 @@
+/*
+*
+* Copyright 2017, The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef TYPED_LOGGER_H
+#define TYPED_LOGGER_H
+
+#include <media/nbaio/NBLog.h>
+
+extern "C" {
+ extern thread_local android::NBLog::Writer *logWriterTLS;
+}
+
+#define LOGF(fmt, ...) logWriterTLS->logFormat(fmt, ##__VA_ARGS__)
+
+#endif
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 35bb021..16fed70 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -16,6 +16,8 @@
#pragma once
+#include <sys/types.h>
+
#include "AudioPort.h"
#include <RoutingStrategy.h>
#include <utils/Errors.h>
@@ -128,6 +130,7 @@
sp<SwAudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output
sp<SwAudioOutputDescriptor> mOutput2; // used by duplicated outputs: second output
uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
+ uid_t mDirectClientUid; // uid of the direct output client
uint32_t mGlobalRefCount; // non-stream-specific ref count
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 93b7f47..5643335 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -223,7 +223,7 @@
: AudioOutputDescriptor(profile, clientInterface),
mProfile(profile), mIoHandle(0), mLatency(0),
mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
- mOutput1(0), mOutput2(0), mDirectOpenCount(0), mGlobalRefCount(0)
+ mOutput1(0), mOutput2(0), mDirectOpenCount(0), mDirectClientUid(0), mGlobalRefCount(0)
{
if (profile != NULL) {
mFlags = (audio_output_flags_t)profile->getFlags();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index eb61ec4..564ed56 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -752,7 +752,7 @@
ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
device, stream, samplingRate, format, channelMask, flags);
- return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE,
+ return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE, uid_t{0} /*Invalid uid*/,
stream, samplingRate,format, channelMask,
flags, offloadInfo);
}
@@ -832,7 +832,7 @@
ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
device, config->sample_rate, config->format, config->channel_mask, flags);
- *output = getOutputForDevice(device, session, *stream,
+ *output = getOutputForDevice(device, session, uid, *stream,
config->sample_rate, config->format, config->channel_mask,
flags, &config->offload_info);
if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -846,6 +846,7 @@
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
audio_devices_t device,
audio_session_t session __unused,
+ uid_t clientUid,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
@@ -954,13 +955,21 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
outputDesc = desc;
- // reuse direct output if currently open and configured with same parameters
+ // reuse direct output if currently open by the same client
+ // and configured with same parameters
if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
- outputDesc->mDirectOpenCount++;
- ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
- return mOutputs.keyAt(i);
+ audio_formats_match(format, outputDesc->mFormat) &&
+ (channelMask == outputDesc->mChannelMask)) {
+ if (clientUid == outputDesc->mDirectClientUid) {
+ outputDesc->mDirectOpenCount++;
+ ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
+ return mOutputs.keyAt(i);
+ } else {
+ ALOGV("getOutput() do not reuse direct output because current client (%ld) "
+ "is not the same as requesting client (%ld)",
+ (long)outputDesc->mDirectClientUid, (long)clientUid);
+ goto non_direct_output;
+ }
}
}
}
@@ -1028,6 +1037,7 @@
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
+ outputDesc->mDirectClientUid = clientUid;
audio_io_handle_t srcOutput = getOutputForEffect();
addOutput(output, outputDesc);
@@ -1932,7 +1942,7 @@
ALOG_ASSERT(inputDesc != 0);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
- if (index < 0) {
+ if (audioSession == 0) {
ALOGW("releaseInput() unknown session %d on input %d", session, input);
return;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index cea3f54..3dfcde6 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -625,6 +625,7 @@
audio_io_handle_t getOutputForDevice(
audio_devices_t device,
audio_session_t session,
+ uid_t client,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index f708654..0547a69 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -651,22 +651,6 @@
return String8(formattedTime);
}
-int CameraService::getCameraPriorityFromProcState(int procState) {
- // Find the priority for the camera usage based on the process state. Higher priority clients
- // win for evictions.
- if (procState < 0) {
- ALOGE("%s: Received invalid process state %d from ActivityManagerService!", __FUNCTION__,
- procState);
- return -1;
- }
- // Treat sleeping TOP processes the same as regular TOP processes, for
- // access priority. This is important for lock-screen camera launch scenarios
- if (procState == PROCESS_STATE_TOP_SLEEPING) {
- procState = PROCESS_STATE_TOP;
- }
- return INT_MAX - procState;
-}
-
Status CameraService::getCameraVendorTagDescriptor(
/*out*/
hardware::camera2::params::VendorTagDescriptor* desc) {
@@ -684,7 +668,6 @@
int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
ATRACE_CALL();
- struct camera_info info;
int deviceVersion = 0;
@@ -692,6 +675,7 @@
int id = cameraIdToInt(cameraId);
if (id < 0) return -1;
+ struct camera_info info;
if (mModule->getCameraInfo(id, &info) != OK) {
return -1;
}
@@ -710,8 +694,15 @@
hardware::hidl_version maxVersion{0,0};
res = mCameraProviderManager->getHighestSupportedVersion(String8::std_string(cameraId),
&maxVersion);
- if (res == NAME_NOT_FOUND) return -1;
+ if (res != OK) return -1;
deviceVersion = HARDWARE_DEVICE_API_VERSION(maxVersion.get_major(), maxVersion.get_minor());
+
+ hardware::CameraInfo info;
+ if (facing) {
+ res = mCameraProviderManager->getCameraInfo(String8::std_string(cameraId), &info);
+ if (res != OK) return -1;
+ *facing = info.facing;
+ }
}
return deviceVersion;
}
@@ -1205,20 +1196,24 @@
std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
ownerPids.push_back(clientPid);
- // Use the value +PROCESS_STATE_NONEXISTENT, to avoid taking
- // address of PROCESS_STATE_NONEXISTENT as a reference argument
- // for the vector constructor. PROCESS_STATE_NONEXISTENT does
- // not have an out-of-class definition.
- std::vector<int> priorities(ownerPids.size(), +PROCESS_STATE_NONEXISTENT);
+ std::vector<int> priorityScores(ownerPids.size());
+ std::vector<int> states(ownerPids.size());
- // Get priorites of all active PIDs
- ProcessInfoService::getProcessStatesFromPids(ownerPids.size(), &ownerPids[0],
- /*out*/&priorities[0]);
+ // Get priority scores of all active PIDs
+ status_t err = ProcessInfoService::getProcessStatesScoresFromPids(
+ ownerPids.size(), &ownerPids[0], /*out*/&states[0],
+ /*out*/&priorityScores[0]);
+ if (err != OK) {
+ ALOGE("%s: Priority score query failed: %d",
+ __FUNCTION__, err);
+ return err;
+ }
// Update all active clients' priorities
- std::map<int,int> pidToPriorityMap;
+ std::map<int,resource_policy::ClientPriority> pidToPriorityMap;
for (size_t i = 0; i < ownerPids.size() - 1; i++) {
- pidToPriorityMap.emplace(ownerPids[i], getCameraPriorityFromProcState(priorities[i]));
+ pidToPriorityMap.emplace(ownerPids[i],
+ resource_policy::ClientPriority(priorityScores[i], states[i]));
}
mActiveClientManager.updatePriorities(pidToPriorityMap);
@@ -1235,7 +1230,9 @@
clientDescriptor = CameraClientManager::makeClientDescriptor(cameraId,
sp<BasicClient>{nullptr}, static_cast<int32_t>(state->getCost()),
state->getConflicting(),
- getCameraPriorityFromProcState(priorities[priorities.size() - 1]), clientPid);
+ priorityScores[priorityScores.size() - 1],
+ clientPid,
+ states[states.size() - 1]);
// Find clients that would be evicted
auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
@@ -1252,19 +1249,22 @@
mActiveClientManager.getIncompatibleClients(clientDescriptor);
String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
- "(PID %d, priority %d) due to eviction policy", curTime.string(),
+ "(PID %d, score %d state %d) due to eviction policy", curTime.string(),
cameraId.string(), packageName.string(), clientPid,
- getCameraPriorityFromProcState(priorities[priorities.size() - 1]));
+ priorityScores[priorityScores.size() - 1],
+ states[states.size() - 1]);
for (auto& i : incompatibleClients) {
msg.appendFormat("\n - Blocked by existing device %s client for package %s"
- "(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
- String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
- i->getPriority());
+ "(PID %" PRId32 ", score %" PRId32 ", state %" PRId32 ")",
+ i->getKey().string(),
+ String8{i->getValue()->getPackageName()}.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState());
ALOGE(" Conflicts with: Device %s, client package %s (PID %"
- PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+ PRId32 ", score %" PRId32 ", state %" PRId32 ")", i->getKey().string(),
String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
- i->getPriority());
+ i->getPriority().getScore(), i->getPriority().getState());
}
// Log the client's attempt
@@ -1292,12 +1292,14 @@
// Log the clients evicted
logEvent(String8::format("EVICT device %s client held by package %s (PID"
- " %" PRId32 ", priority %" PRId32 ")\n - Evicted by device %s client for"
- " package %s (PID %d, priority %" PRId32 ")",
+ " %" PRId32 ", score %" PRId32 ", state %" PRId32 ")\n - Evicted by device %s client for"
+ " package %s (PID %d, score %" PRId32 ", state %" PRId32 ")",
i->getKey().string(), String8{clientSp->getPackageName()}.string(),
- i->getOwnerId(), i->getPriority(), cameraId.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState(), cameraId.string(),
packageName.string(), clientPid,
- getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
+ priorityScores[priorityScores.size() - 1],
+ states[states.size() - 1]));
// Notify the client of disconnection
clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@@ -1532,17 +1534,14 @@
// give flashlight a chance to close devices if necessary.
mFlashlight->prepareDeviceOpen(cameraId);
- // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
- int id = cameraIdToInt(cameraId);
- if (id == -1) {
- ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
- cameraId.string());
- return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Bad camera ID \"%s\" passed to camera open", cameraId.string());
- }
-
int facing = -1;
int deviceVersion = getDeviceVersion(cameraId, /*out*/&facing);
+ if (facing == -1) {
+ ALOGE("%s: Unable to get camera device \"%s\" facing", __FUNCTION__, cameraId.string());
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Unable to get camera device \"%s\" facing", cameraId.string());
+ }
+
sp<BasicClient> tmp = nullptr;
if(!(ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
@@ -1879,10 +1878,7 @@
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- int facing = -1;
-
- int deviceVersion = getDeviceVersion(id, &facing);
-
+ int deviceVersion = getDeviceVersion(id);
switch(deviceVersion) {
case CAMERA_DEVICE_API_VERSION_1_0:
case CAMERA_DEVICE_API_VERSION_3_0:
@@ -2134,9 +2130,11 @@
// Log the clients evicted
logEvent(String8::format("EVICT device %s client held by package %s (PID %"
- PRId32 ", priority %" PRId32 ")\n - Evicted due to user switch.",
- i->getKey().string(), String8{clientSp->getPackageName()}.string(),
- i->getOwnerId(), i->getPriority()));
+ PRId32 ", score %" PRId32 ", state %" PRId32 ")\n - Evicted due"
+ " to user switch.", i->getKey().string(),
+ String8{clientSp->getPackageName()}.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState()));
}
@@ -2188,8 +2186,11 @@
const std::set<userid_t>& newUserIds) {
String8 newUsers = toString(newUserIds);
String8 oldUsers = toString(oldUserIds);
+ if (oldUsers.size() == 0) {
+ oldUsers = "<None>";
+ }
// Log the new and old users
- logEvent(String8::format("USER_SWITCH previous allowed users: %s , current allowed users: %s",
+ logEvent(String8::format("USER_SWITCH previous allowed user IDs: %s, current allowed user IDs: %s",
oldUsers.string(), newUsers.string()));
}
@@ -2669,7 +2670,8 @@
String8 key = i->getKey();
int32_t cost = i->getCost();
int32_t pid = i->getOwnerId();
- int32_t priority = i->getPriority();
+ int32_t score = i->getPriority().getScore();
+ int32_t state = i->getPriority().getState();
auto conflicting = i->getConflicting();
auto clientSp = i->getValue();
String8 packageName;
@@ -2679,8 +2681,8 @@
uid_t clientUid = clientSp->getClientUid();
clientUserId = multiuser_get_user_id(clientUid);
}
- ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Priority: %"
- PRId32 ", ", key.string(), cost, pid, priority);
+ ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Score: %"
+ PRId32 ", State: %" PRId32, key.string(), cost, pid, score, state);
if (clientSp.get() != nullptr) {
ret.appendFormat("User Id: %d, ", clientUserId);
@@ -2702,16 +2704,18 @@
CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
const String8& key, const sp<BasicClient>& value, int32_t cost,
- const std::set<String8>& conflictingKeys, int32_t priority, int32_t ownerId) {
+ const std::set<String8>& conflictingKeys, int32_t score, int32_t ownerId,
+ int32_t state) {
return std::make_shared<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>(
- key, value, cost, conflictingKeys, priority, ownerId);
+ key, value, cost, conflictingKeys, score, ownerId, state);
}
CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
const sp<BasicClient>& value, const CameraService::DescriptorPtr& partial) {
return makeClientDescriptor(partial->getKey(), value, partial->getCost(),
- partial->getConflicting(), partial->getPriority(), partial->getOwnerId());
+ partial->getConflicting(), partial->getPriority().getScore(),
+ partial->getOwnerId(), partial->getPriority().getState());
}
// ----------------------------------------------------------------------------
@@ -2735,181 +2739,164 @@
status_t CameraService::dump(int fd, const Vector<String16>& args) {
ATRACE_CALL();
- String8 result("Dump of the Camera Service:\n");
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- result = result.format("Permission Denial: "
- "can't dump CameraService from pid=%d, uid=%d\n",
+ dprintf(fd, "Permission Denial: can't dump CameraService from pid=%d, uid=%d\n",
getCallingPid(),
getCallingUid());
- write(fd, result.string(), result.size());
- } else {
- bool locked = tryLock(mServiceLock);
- // failed to lock - CameraService is probably deadlocked
- if (!locked) {
- result.append("CameraService may be deadlocked\n");
- write(fd, result.string(), result.size());
- }
+ return NO_ERROR;
+ }
+ bool locked = tryLock(mServiceLock);
+ // failed to lock - CameraService is probably deadlocked
+ if (!locked) {
+ dprintf(fd, "!! CameraService may be deadlocked !!\n");
+ }
- bool hasClient = false;
- if (!mInitialized) {
- result = String8::format("No camera HAL available!\n");
- write(fd, result.string(), result.size());
+ if (!mInitialized) {
+ dprintf(fd, "!! No camera HAL available !!\n");
- // Dump event log for error information
- dumpEventLog(fd);
-
- if (locked) mServiceLock.unlock();
- return NO_ERROR;
- }
- if (mModule == nullptr) {
- mCameraProviderManager->dump(fd, args);
- // TODO - need way more dumping here
-
- if (locked) mServiceLock.unlock();
- return NO_ERROR;
- }
-
- result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
- result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
- result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
- result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
- result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
- result.appendFormat("Number of normal camera devices: %d\n", mNumberOfNormalCameras);
- String8 activeClientString = mActiveClientManager.toString();
- result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
- result.appendFormat("Allowed users:\n%s\n", toString(mAllowedUsers).string());
-
- sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
- if (desc == NULL) {
- result.appendFormat("Vendor tags left unimplemented.\n");
- } else {
- result.appendFormat("Vendor tag definitions:\n");
- }
-
- write(fd, result.string(), result.size());
-
- if (desc != NULL) {
- desc->dump(fd, /*verbosity*/2, /*indentation*/4);
- }
-
+ // Dump event log for error information
dumpEventLog(fd);
- bool stateLocked = tryLock(mCameraStatesLock);
- if (!stateLocked) {
- result = String8::format("CameraStates in use, may be deadlocked\n");
- write(fd, result.string(), result.size());
+ if (locked) mServiceLock.unlock();
+ return NO_ERROR;
+ }
+ dprintf(fd, "\n== Service global info: ==\n\n");
+ dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
+ dprintf(fd, "Number of normal camera devices: %d\n", mNumberOfNormalCameras);
+ String8 activeClientString = mActiveClientManager.toString();
+ dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
+ dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+
+ dumpEventLog(fd);
+
+ bool stateLocked = tryLock(mCameraStatesLock);
+ if (!stateLocked) {
+ dprintf(fd, "CameraStates in use, may be deadlocked\n");
+ }
+
+ for (auto& state : mCameraStates) {
+ String8 cameraId = state.first;
+
+ dprintf(fd, "== Camera device %s dynamic info: ==\n", cameraId.string());
+
+ CameraParameters p = state.second->getShimParams();
+ if (!p.isEmpty()) {
+ dprintf(fd, " Camera1 API shim is using parameters:\n ");
+ p.dump(fd, args);
}
- for (auto& state : mCameraStates) {
- String8 cameraId = state.first;
- result = String8::format("Camera %s information:\n", cameraId.string());
- camera_info info;
+ auto clientDescriptor = mActiveClientManager.get(cameraId);
+ if (clientDescriptor != nullptr) {
+ dprintf(fd, " Device %s is open. Client instance dump:\n",
+ cameraId.string());
+ dprintf(fd, " Client priority score: %d state: %d\n",
+ clientDescriptor->getPriority().getScore(),
+ clientDescriptor->getPriority().getState());
+ dprintf(fd, " Client PID: %d\n", clientDescriptor->getOwnerId());
+ auto client = clientDescriptor->getValue();
+ dprintf(fd, " Client package: %s\n",
+ String8(client->getPackageName()).string());
+
+ client->dumpClient(fd, args);
+ } else {
+ dprintf(fd, " Device %s is closed, no client instance\n",
+ cameraId.string());
+ }
+
+ if (mModule != nullptr) {
+ dprintf(fd, "== Camera HAL device %s static information: ==\n", cameraId.string());
+
+ camera_info info;
status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
+ int deviceVersion = -1;
if (rc != OK) {
- result.appendFormat(" Error reading static information!\n");
- write(fd, result.string(), result.size());
+ dprintf(fd, " Error reading static information!\n");
} else {
- result.appendFormat(" Facing: %s\n",
+ dprintf(fd, " Facing: %s\n",
info.facing == CAMERA_FACING_BACK ? "BACK" :
- info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
- result.appendFormat(" Orientation: %d\n", info.orientation);
- int deviceVersion;
+ info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
+ dprintf(fd, " Orientation: %d\n", info.orientation);
+
if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
} else {
deviceVersion = info.device_version;
}
-
- auto conflicting = state.second->getConflicting();
- result.appendFormat(" Resource Cost: %d\n", state.second->getCost());
- result.appendFormat(" Conflicting Devices:");
- for (auto& id : conflicting) {
- result.appendFormat(" %s", id.string());
- }
- if (conflicting.size() == 0) {
- result.appendFormat(" NONE");
- }
- result.appendFormat("\n");
-
- result.appendFormat(" Device version: %#x\n", deviceVersion);
- if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
- result.appendFormat(" Device static metadata:\n");
- write(fd, result.string(), result.size());
- dump_indented_camera_metadata(info.static_camera_characteristics,
- fd, /*verbosity*/2, /*indentation*/4);
- } else {
- write(fd, result.string(), result.size());
- }
-
- CameraParameters p = state.second->getShimParams();
- if (!p.isEmpty()) {
- result = String8::format(" Camera1 API shim is using parameters:\n ");
- write(fd, result.string(), result.size());
- p.dump(fd, args);
- }
}
- auto clientDescriptor = mActiveClientManager.get(cameraId);
- if (clientDescriptor == nullptr) {
- result = String8::format(" Device %s is closed, no client instance\n",
- cameraId.string());
- write(fd, result.string(), result.size());
- continue;
+ auto conflicting = state.second->getConflicting();
+ dprintf(fd, " Resource Cost: %d\n", state.second->getCost());
+ dprintf(fd, " Conflicting Devices:");
+ for (auto& id : conflicting) {
+ dprintf(fd, " %s", id.string());
}
- hasClient = true;
- result = String8::format(" Device %s is open. Client instance dump:\n\n",
- cameraId.string());
- result.appendFormat("Client priority level: %d\n", clientDescriptor->getPriority());
- result.appendFormat("Client PID: %d\n", clientDescriptor->getOwnerId());
+ if (conflicting.size() == 0) {
+ dprintf(fd, " NONE");
+ }
+ dprintf(fd, "\n");
- auto client = clientDescriptor->getValue();
- result.appendFormat("Client package: %s\n",
- String8(client->getPackageName()).string());
- write(fd, result.string(), result.size());
-
- client->dumpClient(fd, args);
+ dprintf(fd, " Device version: %#x\n", deviceVersion);
+ if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
+ dprintf(fd, " Device static metadata:\n");
+ dump_indented_camera_metadata(info.static_camera_characteristics,
+ fd, /*verbosity*/2, /*indentation*/4);
+ }
}
- if (stateLocked) mCameraStatesLock.unlock();
+ }
- if (!hasClient) {
- result = String8::format("\nNo active camera clients yet.\n");
- write(fd, result.string(), result.size());
- }
+ if (stateLocked) mCameraStatesLock.unlock();
- if (locked) mServiceLock.unlock();
+ if (locked) mServiceLock.unlock();
- // Dump camera traces if there were any
- write(fd, "\n", 1);
- camera3::CameraTraces::dump(fd, args);
+ if (mModule == nullptr) {
+ mCameraProviderManager->dump(fd, args);
+ } else {
+ dprintf(fd, "\n== Camera Module HAL static info: ==\n");
+ dprintf(fd, "Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
+ dprintf(fd, "Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
+ dprintf(fd, "Camera module name: %s\n", mModule->getModuleName());
+ dprintf(fd, "Camera module author: %s\n", mModule->getModuleAuthor());
+ }
- // Process dump arguments, if any
- int n = args.size();
- String16 verboseOption("-v");
- String16 unreachableOption("--unreachable");
- for (int i = 0; i < n; i++) {
- if (args[i] == verboseOption) {
- // change logging level
- if (i + 1 >= n) continue;
- String8 levelStr(args[i+1]);
- int level = atoi(levelStr.string());
- result = String8::format("\nSetting log level to %d.\n", level);
- setLogLevel(level);
- write(fd, result.string(), result.size());
- } else if (args[i] == unreachableOption) {
- // Dump memory analysis
- // TODO - should limit be an argument parameter?
- UnreachableMemoryInfo info;
- bool success = GetUnreachableMemory(info, /*limit*/ 10000);
- if (!success) {
- dprintf(fd, "\nUnable to dump unreachable memory. "
- "Try disabling SELinux enforcement.\n");
- } else {
- dprintf(fd, "\nDumping unreachable memory:\n");
- std::string s = info.ToString(/*log_contents*/ true);
- write(fd, s.c_str(), s.size());
- }
+ dprintf(fd, "\n== Vendor tags: ==\n\n");
+
+ sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+ if (desc == NULL) {
+ dprintf(fd, "No vendor tags.\n");
+ } else {
+ desc->dump(fd, /*verbosity*/2, /*indentation*/2);
+ }
+
+ // Dump camera traces if there were any
+ dprintf(fd, "\n");
+ camera3::CameraTraces::dump(fd, args);
+
+ // Process dump arguments, if any
+ int n = args.size();
+ String16 verboseOption("-v");
+ String16 unreachableOption("--unreachable");
+ for (int i = 0; i < n; i++) {
+ if (args[i] == verboseOption) {
+ // change logging level
+ if (i + 1 >= n) continue;
+ String8 levelStr(args[i+1]);
+ int level = atoi(levelStr.string());
+ dprintf(fd, "\nSetting log level to %d.\n", level);
+ setLogLevel(level);
+ } else if (args[i] == unreachableOption) {
+ // Dump memory analysis
+ // TODO - should limit be an argument parameter?
+ UnreachableMemoryInfo info;
+ bool success = GetUnreachableMemory(info, /*limit*/ 10000);
+ if (!success) {
+ dprintf(fd, "\n== Unable to dump unreachable memory. "
+ "Try disabling SELinux enforcement. ==\n");
+ } else {
+ dprintf(fd, "\n== Dumping unreachable memory: ==\n");
+ std::string s = info.ToString(/*log_contents*/ true);
+ write(fd, s.c_str(), s.size());
}
}
}
@@ -2917,21 +2904,19 @@
}
void CameraService::dumpEventLog(int fd) {
- String8 result = String8("\nPrior client events (most recent at top):\n");
+ dprintf(fd, "\n== Camera service events log (most recent at top): ==\n");
Mutex::Autolock l(mLogLock);
for (const auto& msg : mEventLog) {
- result.appendFormat(" %s\n", msg.string());
+ dprintf(fd, " %s\n", msg.string());
}
if (mEventLog.size() == DEFAULT_EVENT_LOG_LENGTH) {
- result.append(" ...\n");
+ dprintf(fd, " ...\n");
} else if (mEventLog.size() == 0) {
- result.append(" [no events yet]\n");
+ dprintf(fd, " [no events yet]\n");
}
- result.append("\n");
-
- write(fd, result.string(), result.size());
+ dprintf(fd, "\n");
}
void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d463b59..c7acdc9 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -75,11 +75,6 @@
API_2 = 2
};
- // Process state (mirrors frameworks/base/core/java/android/app/ActivityManager.java)
- static const int PROCESS_STATE_NONEXISTENT = -1;
- static const int PROCESS_STATE_TOP = 2;
- static const int PROCESS_STATE_TOP_SLEEPING = 5;
-
// 3 second busy timeout when other clients are connecting
static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
@@ -402,8 +397,8 @@
* Make a ClientDescriptor object wrapping the given BasicClient strong pointer.
*/
static DescriptorPtr makeClientDescriptor(const String8& key, const sp<BasicClient>& value,
- int32_t cost, const std::set<String8>& conflictingKeys, int32_t priority,
- int32_t ownerId);
+ int32_t cost, const std::set<String8>& conflictingKeys, int32_t score,
+ int32_t ownerId, int32_t state);
/**
* Make a ClientDescriptor object wrapping the given BasicClient strong pointer with
@@ -775,11 +770,6 @@
*/
static String8 getFormattedCurrentTime();
- /**
- * Get the camera eviction priority from the current process state given by ActivityManager.
- */
- static int getCameraPriorityFromProcState(int procState);
-
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index f6ca903..2618838 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1285,37 +1285,34 @@
}
status_t CameraDeviceClient::dumpClient(int fd, const Vector<String16>& args) {
- String8 result;
- result.appendFormat("CameraDeviceClient[%s] (%p) dump:\n",
+ dprintf(fd, " CameraDeviceClient[%s] (%p) dump:\n",
mCameraIdStr.string(),
(getRemoteCallback() != NULL ?
IInterface::asBinder(getRemoteCallback()).get() : NULL) );
- result.appendFormat(" Current client UID %u\n", mClientUid);
+ dprintf(fd, " Current client UID %u\n", mClientUid);
- result.append(" State:\n");
- result.appendFormat(" Request ID counter: %d\n", mRequestIdCounter);
+ dprintf(fd, " State:\n");
+ dprintf(fd, " Request ID counter: %d\n", mRequestIdCounter);
if (mInputStream.configured) {
- result.appendFormat(" Current input stream ID: %d\n",
- mInputStream.id);
+ dprintf(fd, " Current input stream ID: %d\n", mInputStream.id);
} else {
- result.append(" No input stream configured.\n");
+ dprintf(fd, " No input stream configured.\n");
}
if (!mStreamMap.isEmpty()) {
- result.append(" Current output stream/surface IDs:\n");
+ dprintf(fd, " Current output stream/surface IDs:\n");
for (size_t i = 0; i < mStreamMap.size(); i++) {
- result.appendFormat(" Stream %d Surface %d\n",
+ dprintf(fd, " Stream %d Surface %d\n",
mStreamMap.valueAt(i).streamId(),
mStreamMap.valueAt(i).surfaceId());
}
} else if (!mDeferredStreams.isEmpty()) {
- result.append(" Current deferred surface output stream IDs:\n");
+ dprintf(fd, " Current deferred surface output stream IDs:\n");
for (auto& streamId : mDeferredStreams) {
- result.appendFormat(" Stream %d\n", streamId);
+ dprintf(fd, " Stream %d\n", streamId);
}
} else {
- result.append(" No output streams configured.\n");
+ dprintf(fd, " No output streams configured.\n");
}
- write(fd, result.string(), result.size());
// TODO: print dynamic/request section from most recent requests
mFrameProcessor->dump(fd, args);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index f691dc1..f44fd08 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -47,43 +47,25 @@
status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
ServiceInteractionProxy* proxy) {
- int numProviders = 0;
- {
- std::lock_guard<std::mutex> lock(mInterfaceMutex);
- if (proxy == nullptr) {
- ALOGE("%s: No valid service interaction proxy provided", __FUNCTION__);
- return BAD_VALUE;
- }
- mListener = listener;
- mServiceProxy = proxy;
-
- // Registering will trigger notifications for all already-known providers
- bool success = mServiceProxy->registerForNotifications(
- /* instance name, empty means no filter */ "",
- this);
- if (!success) {
- ALOGE("%s: Unable to register with hardware service manager for notifications "
- "about camera providers", __FUNCTION__);
- return INVALID_OPERATION;
- }
- numProviders = mProviders.size();
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ if (proxy == nullptr) {
+ ALOGE("%s: No valid service interaction proxy provided", __FUNCTION__);
+ return BAD_VALUE;
}
+ mListener = listener;
+ mServiceProxy = proxy;
- if (numProviders == 0) {
- // Remote provider might have not been initialized
- // Wait for a bit and see if we get one registered
- std::mutex mtx;
- std::unique_lock<std::mutex> lock(mtx);
- mProviderRegistered.wait_for(lock, std::chrono::seconds(15));
- if (mProviders.size() == 0) {
- ALOGI("%s: Unable to get one registered provider within timeout!",
- __FUNCTION__);
- std::lock_guard<std::mutex> lock(mInterfaceMutex);
- // See if there's a passthrough HAL, but let's not complain if there's not
- addProvider(kLegacyProviderName, /*expected*/ false);
- }
+ // Registering will trigger notifications for all already-known providers
+ bool success = mServiceProxy->registerForNotifications(
+ /* instance name, empty means no filter */ "",
+ this);
+ if (!success) {
+ ALOGE("%s: Unable to register with hardware service manager for notifications "
+ "about camera providers", __FUNCTION__);
+ return INVALID_OPERATION;
}
-
+ // See if there's a passthrough HAL, but let's not complain if there's not
+ addProvider(kLegacyProviderName, /*expected*/ false);
return OK;
}
@@ -294,14 +276,12 @@
std::lock_guard<std::mutex> lock(mInterfaceMutex);
addProvider(name);
- mProviderRegistered.notify_one();
return hardware::Return<void>();
}
status_t CameraProviderManager::dump(int fd, const Vector<String16>& args) {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- dprintf(fd, "Available camera providers and devices:\n");
for (auto& provider : mProviders) {
provider->dump(fd, args);
}
@@ -335,12 +315,11 @@
mServiceProxy->getService(newProvider);
if (interface == nullptr) {
+ ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+ newProvider.c_str());
if (expected) {
- ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
- newProvider.c_str());
return BAD_VALUE;
} else {
- // Not guaranteed to be found, so not an error if it wasn't
return OK;
}
}
@@ -482,18 +461,46 @@
}
status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
- dprintf(fd, " %s: %zu devices:\n", mProviderName.c_str(), mDevices.size());
+ dprintf(fd, "== Camera Provider HAL %s (v2.4) static info: %zu devices: ==\n",
+ mProviderName.c_str(), mDevices.size());
for (auto& device : mDevices) {
- dprintf(fd, " %s: Resource cost: %d\n", device->mName.c_str(),
- device->mResourceCost.resourceCost);
- if (device->mResourceCost.conflictingDevices.size() > 0) {
- dprintf(fd, " Conflicting devices:\n");
+ dprintf(fd, "== Camera HAL device %s (v%d.%d) static information: ==\n", device->mName.c_str(),
+ device->mVersion.get_major(), device->mVersion.get_minor());
+ dprintf(fd, " Resource cost: %d\n", device->mResourceCost.resourceCost);
+ if (device->mResourceCost.conflictingDevices.size() == 0) {
+ dprintf(fd, " Conflicting devices: None\n");
+ } else {
+ dprintf(fd, " Conflicting devices:\n");
for (size_t i = 0; i < device->mResourceCost.conflictingDevices.size(); i++) {
- dprintf(fd, " %s\n",
+ dprintf(fd, " %s\n",
device->mResourceCost.conflictingDevices[i].c_str());
}
}
+ dprintf(fd, " API1 info:\n");
+ dprintf(fd, " Has a flash unit: %s\n",
+ device->hasFlashUnit() ? "true" : "false");
+ hardware::CameraInfo info;
+ status_t res = device->getCameraInfo(&info);
+ if (res != OK) {
+ dprintf(fd, " <Error reading camera info: %s (%d)>\n",
+ strerror(-res), res);
+ } else {
+ dprintf(fd, " Facing: %s\n",
+ info.facing == hardware::CAMERA_FACING_BACK ? "Back" : "Front");
+ dprintf(fd, " Orientation: %d\n", info.orientation);
+ }
+ CameraMetadata info2;
+ res = device->getCameraCharacteristics(&info2);
+ if (res == INVALID_OPERATION) {
+ dprintf(fd, " API2 not directly supported\n");
+ } else if (res != OK) {
+ dprintf(fd, " <Error reading camera characteristics: %s (%d)>\n",
+ strerror(-res), res);
+ } else {
+ dprintf(fd, " API2 camera characteristics:\n");
+ info2.dump(fd, /*verbosity*/ 2, /*indentation*/ 4);
+ }
}
return OK;
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index f21e07d..5ae16cd 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -20,7 +20,6 @@
#include <vector>
#include <string>
#include <mutex>
-#include <condition_variable>
#include <camera/CameraParameters2.h>
#include <camera/CameraMetadata.h>
@@ -220,8 +219,6 @@
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
- std::condition_variable mProviderRegistered;
-
// the status listener update callbacks will lock mStatusMutex
mutable std::mutex mStatusListenerMutex;
wp<StatusListener> mListener;
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index 5a5d7b7..d45891f 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -19,6 +19,7 @@
#define ATRACE_TAG ATRACE_TAG_CAMERA
#include <gui/ISurfaceComposer.h>
+#include <gui/IGraphicBufferAlloc.h>
#include <private/gui/ComposerService.h>
#include <utils/Log.h>
#include <utils/Trace.h>
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
index b5b86a3..f44c4a3 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.h
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -26,6 +26,8 @@
namespace android {
+class IGraphicBufferAlloc;
+
namespace camera3 {
struct StreamInfo;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9873de1..068a2b3 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -749,7 +749,7 @@
mTagMonitor.dumpMonitoredMetadata(fd);
if (mInterface->valid()) {
- lines = String8(" HAL device dump:\n");
+ lines = String8(" HAL device dump:\n");
write(fd, lines.string(), lines.size());
mInterface->dump(fd);
}
@@ -2287,7 +2287,7 @@
// Boost priority of request thread to SCHED_FIFO.
pid_t requestThreadTid = mRequestThread->getTid();
res = requestPriority(getpid(), requestThreadTid,
- kRequestThreadPriority, /*asynchronous*/ false);
+ kRequestThreadPriority, /*isForApp*/ false, /*asynchronous*/ false);
if (res != OK) {
ALOGW("Can't set realtime priority for request processing thread: %s (%d)",
strerror(-res), res);
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index f781ded..1469b74 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -85,6 +85,9 @@
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
mBuffersInFlight.push_back(bufferItem);
+ mFrameCount++;
+ mLastTimestamp = bufferItem.mTimestamp;
+
return OK;
}
@@ -220,6 +223,7 @@
mHandoutTotalBufferCount = 0;
mFrameCount = 0;
+ mLastTimestamp = 0;
if (mConsumer.get() == 0) {
sp<IGraphicBufferProducer> producer;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index b5883e3..f971116 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -239,6 +239,7 @@
}
mLastTimestamp = timestamp;
+ mFrameCount++;
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 07f9491..c9f43aa 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -322,6 +322,13 @@
"queueing buffer to output failed (%d)", status);
}
+ // If the queued buffer replaces a pending buffer in the async
+ // queue, no onBufferReleased is called by the buffer queue.
+ // Proactively trigger the callback to avoid buffer loss.
+ if (queueOutput.bufferReplaced) {
+ onBufferReleasedByOutputLocked(mOutputs[id]);
+ }
+
ALOGV("queued buffer %#" PRIx64 " to output %p",
bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
}
@@ -335,6 +342,12 @@
ATRACE_CALL();
Mutex::Autolock lock(mMutex);
+ onBufferReleasedByOutputLocked(from);
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutputLocked(
+ const sp<IGraphicBufferProducer>& from) {
+
sp<GraphicBuffer> buffer;
sp<Fence> fence;
status_t status = from->detachNextBuffer(&buffer, &fence);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 32ae073..92371ff 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -103,6 +103,11 @@
// onFrameAvailable call to proceed.
void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+ // This is the implementation of onBufferReleasedByOutput without the mutex locked.
+ // It could either be called from onBufferReleasedByOutput or from
+ // onFrameAvailable when a buffer in the async buffer queue is overwritten.
+ void onBufferReleasedByOutputLocked(const sp<IGraphicBufferProducer>& from);
+
// When this is called, the splitter disconnects from (i.e., abandons) its
// input queue and signals any waiting onFrameAvailable calls to wake up.
// It still processes callbacks from other outputs, but only detaches their
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 374dc5e..0198690 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -74,7 +74,7 @@
return BAD_VALUE;
}
- dprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+ dprintf(fd, "== Camera error traces (%zu): ==\n", pcsList.size());
if (pcsList.empty()) {
dprintf(fd, " No camera traces collected.\n");
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
index 830c50b..d7135f1 100644
--- a/services/camera/libcameraservice/utils/ClientManager.h
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -31,6 +31,43 @@
namespace android {
namespace resource_policy {
+class ClientPriority {
+public:
+ ClientPriority(int32_t score, int32_t state) :
+ mScore(score), mState(state) {}
+
+ int32_t getScore() const { return mScore; }
+ int32_t getState() const { return mState; }
+
+ bool operator==(const ClientPriority& rhs) const {
+ return (this->mScore == rhs.mScore) && (this->mState == rhs.mState);
+ }
+
+ bool operator< (const ClientPriority& rhs) const {
+ if (this->mScore == rhs.mScore) {
+ return this->mState < rhs.mState;
+ } else {
+ return this->mScore < rhs.mScore;
+ }
+ }
+
+ bool operator> (const ClientPriority& rhs) const {
+ return rhs < *this;
+ }
+
+ bool operator<=(const ClientPriority& rhs) const {
+ return !(*this > rhs);
+ }
+
+ bool operator>=(const ClientPriority& rhs) const {
+ return !(*this < rhs);
+ }
+
+private:
+ int32_t mScore;
+ int32_t mState;
+};
+
// --------------------------------------------------------------------------------
/**
@@ -45,9 +82,9 @@
class ClientDescriptor final {
public:
ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
- const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId);
+ const std::set<KEY>& conflictingKeys, int32_t score, int32_t ownerId, int32_t state);
ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost, std::set<KEY>&& conflictingKeys,
- int32_t priority, int32_t ownerId);
+ int32_t score, int32_t ownerId, int32_t state);
~ClientDescriptor();
@@ -69,7 +106,7 @@
/**
* Return the priority for this descriptor.
*/
- int32_t getPriority() const;
+ const ClientPriority &getPriority() const;
/**
* Return the owner ID for this descriptor.
@@ -89,7 +126,7 @@
/**
* Set the proirity for this descriptor.
*/
- void setPriority(int32_t priority);
+ void setPriority(const ClientPriority& priority);
// This class is ordered by key
template<class K, class V>
@@ -100,7 +137,7 @@
VALUE mValue;
int32_t mCost;
std::set<KEY> mConflicting;
- int32_t mPriority;
+ ClientPriority mPriority;
int32_t mOwnerId;
}; // class ClientDescriptor
@@ -111,16 +148,17 @@
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
- const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId) : mKey{key},
- mValue{value}, mCost{cost}, mConflicting{conflictingKeys}, mPriority{priority},
+ const std::set<KEY>& conflictingKeys, int32_t score, int32_t ownerId, int32_t state) :
+ mKey{key}, mValue{value}, mCost{cost}, mConflicting{conflictingKeys},
+ mPriority(score, state),
mOwnerId{ownerId} {}
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost,
- std::set<KEY>&& conflictingKeys, int32_t priority, int32_t ownerId) :
+ std::set<KEY>&& conflictingKeys, int32_t score, int32_t ownerId, int32_t state) :
mKey{std::forward<KEY>(key)}, mValue{std::forward<VALUE>(value)}, mCost{cost},
- mConflicting{std::forward<std::set<KEY>>(conflictingKeys)}, mPriority{priority},
- mOwnerId{ownerId} {}
+ mConflicting{std::forward<std::set<KEY>>(conflictingKeys)},
+ mPriority(score, state), mOwnerId{ownerId} {}
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::~ClientDescriptor() {}
@@ -141,7 +179,7 @@
}
template<class KEY, class VALUE>
-int32_t ClientDescriptor<KEY, VALUE>::getPriority() const {
+const ClientPriority& ClientDescriptor<KEY, VALUE>::getPriority() const {
return mPriority;
}
@@ -165,7 +203,7 @@
}
template<class KEY, class VALUE>
-void ClientDescriptor<KEY, VALUE>::setPriority(int32_t priority) {
+void ClientDescriptor<KEY, VALUE>::setPriority(const ClientPriority& priority) {
mPriority = priority;
}
@@ -231,7 +269,7 @@
* Given a map containing owner (pid) -> priority mappings, update the priority of each
* ClientDescriptor with an owner in this mapping.
*/
- void updatePriorities(const std::map<int32_t,int32_t>& ownerPriorityList);
+ void updatePriorities(const std::map<int32_t,ClientPriority>& ownerPriorityList);
/**
* Remove all ClientDescriptors.
@@ -383,17 +421,17 @@
const KEY& key = client->getKey();
int32_t cost = client->getCost();
- int32_t priority = client->getPriority();
+ ClientPriority priority = client->getPriority();
int32_t owner = client->getOwnerId();
int64_t totalCost = getCurrentCostLocked() + cost;
// Determine the MRU of the owners tied for having the highest priority
int32_t highestPriorityOwner = owner;
- int32_t highestPriority = priority;
+ ClientPriority highestPriority = priority;
for (const auto& i : mClients) {
- int32_t curPriority = i->getPriority();
- if (curPriority >= highestPriority) {
+ ClientPriority curPriority = i->getPriority();
+ if (curPriority <= highestPriority) {
highestPriority = curPriority;
highestPriorityOwner = i->getOwnerId();
}
@@ -408,7 +446,7 @@
for (const auto& i : mClients) {
const KEY& curKey = i->getKey();
int32_t curCost = i->getCost();
- int32_t curPriority = i->getPriority();
+ ClientPriority curPriority = i->getPriority();
int32_t curOwner = i->getOwnerId();
bool conflicting = (curKey == key || i->isConflicting(key) ||
@@ -417,13 +455,13 @@
if (!returnIncompatibleClients) {
// Find evicted clients
- if (conflicting && curPriority > priority) {
+ if (conflicting && curPriority < priority) {
// Pre-existing conflicting client with higher priority exists
evictList.clear();
evictList.push_back(client);
return evictList;
} else if (conflicting || ((totalCost > mMaxCost && curCost > 0) &&
- (curPriority <= priority) &&
+ (curPriority >= priority) &&
!(highestPriorityOwner == owner && owner == curOwner))) {
// Add a pre-existing client to the eviction list if:
// - We are adding a client with higher priority that conflicts with this one.
@@ -437,7 +475,7 @@
} else {
// Find clients preventing the incoming client from being added
- if (curPriority > priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
+ if (curPriority < priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
// Pre-existing conflicting client with higher priority exists
evictList.push_back(i);
}
@@ -524,7 +562,7 @@
template<class KEY, class VALUE, class LISTENER>
void ClientManager<KEY, VALUE, LISTENER>::updatePriorities(
- const std::map<int32_t,int32_t>& ownerPriorityList) {
+ const std::map<int32_t,ClientPriority>& ownerPriorityList) {
Mutex::Autolock lock(mLock);
for (auto& i : mClients) {
auto j = ownerPriorityList.find(i->getOwnerId());
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index eacafdd..35c1f5b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -280,6 +280,7 @@
nsecs_t ts_since = 0;
String16 clearOption("-clear");
String16 sinceOption("-since");
+ String16 helpOption("-help");
int n = args.size();
for (int i = 0; i < n; i++) {
String8 myarg(args[i]);
@@ -298,6 +299,16 @@
} else {
ts_since = 0;
}
+ // command line is milliseconds; internal units are nano-seconds
+ ts_since *= 1000*1000;
+ } else if (args[i] == helpOption) {
+ result.append("Recognized parameters:\n");
+ result.append("-help this help message\n");
+ result.append("-clear clears out saved records\n");
+ result.append("-since XXX include records since XXX\n");
+ result.append(" (XXX is milliseconds since the UNIX epoch)\n");
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
}
}
@@ -364,8 +375,6 @@
}
String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since) {
- const size_t SIZE = 512;
- char buffer[SIZE];
String8 result;
int slot = 0;
@@ -379,12 +388,7 @@
continue;
}
AString entry = (*it)->toString();
- snprintf(buffer, sizeof(buffer), "%4d: %s",
- slot, entry.c_str());
- result.append(buffer);
- buffer[0] = '\n';
- buffer[1] = '\0';
- result.append(buffer);
+ result.appendFormat("%5d: %s\n", slot, entry.c_str());
slot++;
}
}
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index a5f0751..4cbf737 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -17,7 +17,8 @@
LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
LOCAL_SRC_FILES := main_codecservice.cpp minijail/minijail.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
- liblog libminijail
+ liblog libminijail libcutils \
+ android.hardware.media.omx@1.0
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index a2868c1..f6cde85 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -25,11 +25,14 @@
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
+#include <cutils/properties.h>
// from LOCAL_C_INCLUDES
#include "MediaCodecService.h"
#include "minijail/minijail.h"
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
using namespace android;
int main(int argc __unused, char** argv)
@@ -42,6 +45,21 @@
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
MediaCodecService::instantiate();
+
+ // Treble
+ bool useTrebleOmx = bool(property_get_bool("debug.treble_omx", 0));
+ if (useTrebleOmx) {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> omx = IOmx::getService(true);
+ if (omx == nullptr) {
+ ALOGE("Cannot create a Treble IOmx service.");
+ } else if (omx->registerAsService("default") != OK) {
+ ALOGE("Cannot register a Treble IOmx service.");
+ } else {
+ ALOGV("Treble IOmx service created.");
+ }
+ }
+
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
index b7603bc..a8f2ca9 100644
--- a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
+++ b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
@@ -50,6 +50,7 @@
sched_setscheduler: 1
fstatat64: 1
ugetrlimit: 1
+getdents64: 1
# for attaching to debuggerd on process crash
sigaction: 1
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index f667068..b5831ae 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -24,9 +24,9 @@
libbinder \
liblog \
libmediadrm \
- libutils \
- libandroidfw
-ifeq ($(ENABLE_TREBLE_DRM), true)
+ libhidltransport \
+ libutils
+ifneq ($(DISABLE_TREBLE_DRM), true)
LOCAL_SHARED_LIBRARIES += \
libhidlbase \
libhidlmemory \
@@ -35,8 +35,8 @@
endif
LOCAL_CFLAGS += -Wall -Wextra -Werror
-ifeq ($(ENABLE_TREBLE_DRM), true)
-LOCAL_CFLAGS += -DENABLE_TREBLE_DRM=1
+ifeq ($(DISABLE_TREBLE_DRM), true)
+LOCAL_CFLAGS += -DDISABLE_TREBLE_DRM=1
endif
LOCAL_MODULE:= mediadrmserver
diff --git a/services/mediadrm/MediaDrmService.cpp b/services/mediadrm/MediaDrmService.cpp
index c709b5e..b9ec347 100644
--- a/services/mediadrm/MediaDrmService.cpp
+++ b/services/mediadrm/MediaDrmService.cpp
@@ -24,12 +24,12 @@
#include <binder/IServiceManager.h>
#include <utils/Log.h>
-#ifdef ENABLE_TREBLE_DRM
-#include <media/CryptoHal.h>
-#include <media/DrmHal.h>
-#else
+#ifdef DISABLE_TREBLE_DRM
#include <media/Crypto.h>
#include <media/Drm.h>
+#else
+#include <media/CryptoHal.h>
+#include <media/DrmHal.h>
#endif
namespace android {
@@ -40,18 +40,18 @@
}
sp<ICrypto> MediaDrmService::makeCrypto() {
-#ifdef ENABLE_TREBLE_DRM
- return new CryptoHal;
-#else
+#ifdef DISABLE_TREBLE_DRM
return new Crypto;
+#else
+ return new CryptoHal;
#endif
}
sp<IDrm> MediaDrmService::makeDrm() {
-#ifdef ENABLE_TREBLE_DRM
- return new DrmHal;
-#else
+#ifdef DISABLE_TREBLE_DRM
return new Drm;
+#else
+ return new DrmHal;
#endif
}
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
new file mode 100644
index 0000000..dfa9753
--- /dev/null
+++ b/services/oboeservice/AAudioService.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "HandleTracker.h"
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AAudioService.h"
+#include "AAudioServiceStreamFakeHal.h"
+
+using namespace android;
+using namespace aaudio;
+
+typedef enum
+{
+ AAUDIO_HANDLE_TYPE_DUMMY1, // TODO remove DUMMYs
+ AAUDIO_HANDLE_TYPE_DUMMY2, // make server handles different than client
+ AAUDIO_HANDLE_TYPE_STREAM,
+ AAUDIO_HANDLE_TYPE_COUNT
+} aaudio_service_handle_type_t;
+static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+android::AAudioService::AAudioService()
+ : BnAAudioService() {
+}
+
+AAudioService::~AAudioService() {
+}
+
+aaudio_handle_t AAudioService::openStream(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration) {
+ AAudioServiceStreamBase *serviceStream = new AAudioServiceStreamFakeHal();
+ ALOGD("AAudioService::openStream(): created serviceStream = %p", serviceStream);
+ aaudio_result_t result = serviceStream->open(request, configuration);
+ if (result < 0) {
+ ALOGE("AAudioService::openStream(): open returned %d", result);
+ return result;
+ } else {
+ AAudioStream handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
+ ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
+ if (handle < 0) {
+ delete serviceStream;
+ }
+ return handle;
+ }
+}
+
+aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = (AAudioServiceStreamBase *)
+ mHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM,
+ streamHandle);
+ ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
+ if (serviceStream != nullptr) {
+ ALOGD("AAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
+ delete serviceStream;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAudioServiceStreamBase *AAudioService::convertHandleToServiceStream(
+ aaudio_handle_t streamHandle) const {
+ return (AAudioServiceStreamBase *) mHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
+ (aaudio_handle_t)streamHandle);
+}
+
+aaudio_result_t AAudioService::getStreamDescription(
+ aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGD("AAudioService::getStreamDescription(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ return serviceStream->getDescription(parcelable);
+}
+
+aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGD("AAudioService::startStream(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->start();
+ return result;
+}
+
+aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGD("AAudioService::pauseStream(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->pause();
+ return result;
+}
+
+aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGD("AAudioService::flushStream(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ return serviceStream->flush();
+}
+
+aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId,
+ aaudio_nanoseconds_t periodNanoseconds) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::registerAudioThread(), serviceStream == nullptr");
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ return AAUDIO_ERROR_INVALID_ORDER;
+ }
+ serviceStream->setRegisteredThread(clientThreadId);
+ // Boost client thread to SCHED_FIFO
+ struct sched_param sp;
+ memset(&sp, 0, sizeof(sp));
+ sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
+ int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
+ if (err != 0){
+ ALOGE("AAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
+ errno, sp.sched_priority);
+ return AAUDIO_ERROR_INTERNAL;
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::unregisterAudioThread(), serviceStream == nullptr");
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ if (serviceStream->getRegisteredThread() != clientThreadId) {
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ serviceStream->setRegisteredThread(0);
+ return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
new file mode 100644
index 0000000..e9625b2
--- /dev/null
+++ b/services/oboeservice/AAudioService.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_AUDIO_SERVICE_H
+
+#include <time.h>
+#include <pthread.h>
+
+#include <binder/BinderService.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "utility/HandleTracker.h"
+#include "IAAudioService.h"
+#include "AAudioServiceStreamBase.h"
+
+namespace android {
+
+class AAudioService :
+ public BinderService<AAudioService>,
+ public BnAAudioService
+{
+ friend class BinderService<AAudioService>;
+
+public:
+ AAudioService();
+ virtual ~AAudioService();
+
+ static const char* getServiceName() { return "media.audio_aaudio"; }
+
+ virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration);
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t getStreamDescription(
+ aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable);
+
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t pid, aaudio_nanoseconds_t periodNanoseconds) ;
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
+
+private:
+
+ aaudio::AAudioServiceStreamBase *convertHandleToServiceStream(aaudio_handle_t streamHandle) const;
+
+ HandleTracker mHandleTracker;
+
+};
+
+} /* namespace android */
+
+#endif //AAUDIO_AAUDIO_AUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
new file mode 100644
index 0000000..ee9aaa7
--- /dev/null
+++ b/services/oboeservice/AAudioServiceDefinitions.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_SERVICE_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/RingBufferParcelable.h"
+
+namespace aaudio {
+
+// TODO move this an "include" folder for the service.
+
+struct AAudioMessageTimestamp {
+ aaudio_position_frames_t position;
+ int64_t deviceOffset; // add to client position to get device position
+ aaudio_nanoseconds_t timestamp;
+};
+
+typedef enum aaudio_service_event_e : uint32_t {
+ AAUDIO_SERVICE_EVENT_STARTED,
+ AAUDIO_SERVICE_EVENT_PAUSED,
+ AAUDIO_SERVICE_EVENT_FLUSHED,
+ AAUDIO_SERVICE_EVENT_CLOSED,
+ AAUDIO_SERVICE_EVENT_DISCONNECTED
+} aaudio_service_event_t;
+
+struct AAudioMessageEvent {
+ aaudio_service_event_t event;
+ int32_t data1;
+ int64_t data2;
+};
+
+typedef struct AAudioServiceMessage_s {
+ enum class code : uint32_t {
+ NOTHING,
+ TIMESTAMP,
+ EVENT,
+ };
+
+ code what;
+ union {
+ AAudioMessageTimestamp timestamp;
+ AAudioMessageEvent event;
+ };
+} AAudioServiceMessage;
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_H
diff --git a/services/oboeservice/OboeServiceMain.cpp b/services/oboeservice/AAudioServiceMain.cpp
similarity index 67%
rename from services/oboeservice/OboeServiceMain.cpp
rename to services/oboeservice/AAudioServiceMain.cpp
index 18bcf2b..aa89180 100644
--- a/services/oboeservice/OboeServiceMain.cpp
+++ b/services/oboeservice/AAudioServiceMain.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeService"
+#define LOG_TAG "AAudioService"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -33,26 +33,26 @@
#include <cutils/ashmem.h>
#include <sys/mman.h>
-#include "OboeService.h"
-#include "IOboeAudioService.h"
-#include "OboeAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "IAAudioService.h"
+#include "AAudioService.h"
using namespace android;
-using namespace oboe;
+using namespace aaudio;
/**
- * This is used to test the OboeService as a standalone application.
- * It is not used when the OboeService is integrated with AudioFlinger.
+ * This is used to test the AAudioService as a standalone application.
+ * It is not used when the AAudioService is integrated with AudioFlinger.
*/
int main(int argc, char **argv) {
- printf("Test OboeService %s\n", argv[1]);
- ALOGD("This is the OboeAudioService");
+ printf("Test AAudioService %s\n", argv[1]);
+ ALOGD("This is the AAudioService");
- defaultServiceManager()->addService(String16("OboeAudioService"), new OboeAudioService());
+ defaultServiceManager()->addService(String16("AAudioService"), new AAudioService());
android::ProcessState::self()->startThreadPool();
- printf("OboeAudioService service is now ready\n");
+ printf("AAudioService service is now ready\n");
IPCThreadState::self()->joinThreadPool();
- printf("OboeAudioService service thread joined\n");
+ printf("AAudioService service thread joined\n");
return 0;
}
diff --git a/services/oboeservice/OboeServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
similarity index 67%
rename from services/oboeservice/OboeServiceStreamBase.cpp
rename to services/oboeservice/AAudioServiceStreamBase.cpp
index 6b7e4e5..a7938dc 100644
--- a/services/oboeservice/OboeServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -14,40 +14,43 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeService"
+#define LOG_TAG "AAudioService"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
-#include "IOboeAudioService.h"
-#include "OboeService.h"
-#include "OboeServiceStreamBase.h"
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceStreamBase.h"
#include "AudioEndpointParcelable.h"
using namespace android;
-using namespace oboe;
+using namespace aaudio;
/**
* Construct the AudioCommandQueues and the AudioDataQueue
* and fill in the endpoint parcelable.
*/
-OboeServiceStreamBase::OboeServiceStreamBase()
+AAudioServiceStreamBase::AAudioServiceStreamBase()
: mUpMessageQueue(nullptr)
{
// TODO could fail so move out of constructor
mUpMessageQueue = new SharedRingBuffer();
- mUpMessageQueue->allocate(sizeof(OboeServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+ mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
}
-OboeServiceStreamBase::~OboeServiceStreamBase() {
+AAudioServiceStreamBase::~AAudioServiceStreamBase() {
+ Mutex::Autolock _l(mLockUpMessageQueue);
delete mUpMessageQueue;
}
-void OboeServiceStreamBase::sendServiceEvent(oboe_service_event_t event,
+void AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
int32_t data1,
int64_t data2) {
- OboeServiceMessage command;
- command.what = OboeServiceMessage::code::EVENT;
+
+ Mutex::Autolock _l(mLockUpMessageQueue);
+ AAudioServiceMessage command;
+ command.what = AAudioServiceMessage::code::EVENT;
command.event.event = event;
command.event.data1 = data1;
command.event.data2 = data2;
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
new file mode 100644
index 0000000..4a59253
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+
+#include <utils/Mutex.h>
+
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "fifo/FifoBuffer.h"
+#include "SharedRingBuffer.h"
+#include "AudioEndpointParcelable.h"
+#include "AAudioThread.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class AAudioServiceStreamBase {
+
+public:
+ AAudioServiceStreamBase();
+ virtual ~AAudioServiceStreamBase();
+
+ enum {
+ ILLEGAL_THREAD_ID = 0
+ };
+
+ /**
+ * Fill in a parcelable description of stream.
+ */
+ virtual aaudio_result_t getDescription(aaudio::AudioEndpointParcelable &parcelable) = 0;
+
+ /**
+ * Open the device.
+ */
+ virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration) = 0;
+
+ /**
+ * Start the flow of data.
+ */
+ virtual aaudio_result_t start() = 0;
+
+ /**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+ virtual aaudio_result_t pause() = 0;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ */
+ virtual aaudio_result_t flush() = 0;
+
+ virtual aaudio_result_t close() = 0;
+
+ virtual void sendCurrentTimestamp() = 0;
+
+ aaudio_size_frames_t getFramesPerBurst() {
+ return mFramesPerBurst;
+ }
+
+ virtual void sendServiceEvent(aaudio_service_event_t event,
+ int32_t data1 = 0,
+ int64_t data2 = 0);
+
+ virtual void setRegisteredThread(pid_t pid) {
+ mRegisteredClientThread = pid;
+ }
+
+ virtual pid_t getRegisteredThread() {
+ return mRegisteredClientThread;
+ }
+
+protected:
+
+ pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
+
+ SharedRingBuffer * mUpMessageQueue;
+
+ aaudio_sample_rate_t mSampleRate = 0;
+ aaudio_size_bytes_t mBytesPerFrame = 0;
+ aaudio_size_frames_t mFramesPerBurst = 0;
+ aaudio_size_frames_t mCapacityInFrames = 0;
+ aaudio_size_bytes_t mCapacityInBytes = 0;
+
+ android::Mutex mLockUpMessageQueue;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
new file mode 100644
index 0000000..1caeb3f
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamFakeHal.h"
+
+#include "FakeAudioHal.h"
+
+using namespace android;
+using namespace aaudio;
+
+// HACK values for Marlin
+#define CARD_ID 0
+#define DEVICE_ID 19
+
+/**
+ * Construct the audio message queuues and message queues.
+ */
+
+AAudioServiceStreamFakeHal::AAudioServiceStreamFakeHal()
+ : AAudioServiceStreamBase()
+ , mStreamId(nullptr)
+ , mPreviousFrameCounter(0)
+ , mAAudioThread()
+{
+}
+
+AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() {
+ ALOGD("AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() call close()");
+ close();
+}
+
+aaudio_result_t AAudioServiceStreamFakeHal::open(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) {
+ // Open stream on HAL and pass information about the ring buffer to the client.
+ mmap_buffer_info mmapInfo;
+ aaudio_result_t error;
+
+ // Open HAL
+ int bufferCapacity = request.getConfiguration().getBufferCapacity();
+ error = fake_hal_open(CARD_ID, DEVICE_ID, bufferCapacity, &mStreamId);
+ if(error < 0) {
+ ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
+ return error;
+ }
+
+ // Get information about the shared audio buffer.
+ error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
+ if (error < 0) {
+ ALOGE("fake_hal_get_mmap_info returned %d", error);
+ fake_hal_close(mStreamId);
+ mStreamId = nullptr;
+ return error;
+ }
+ mHalFileDescriptor = mmapInfo.fd;
+ mFramesPerBurst = mmapInfo.burst_size_in_frames;
+ mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
+ mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
+ mSampleRate = mmapInfo.sample_rate;
+ mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
+ ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
+ mmapInfo.burst_size_in_frames);
+ ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
+ mmapInfo.buffer_capacity_in_frames);
+ ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
+ mmapInfo.buffer_capacity_in_bytes);
+
+ // Fill in AAudioStreamConfiguration
+ configurationOutput.setSampleRate(mSampleRate);
+ configurationOutput.setSamplesPerFrame(mmapInfo.channel_count);
+ configurationOutput.setAudioFormat(AAUDIO_FORMAT_PCM_I16);
+
+ return AAUDIO_OK;
+}
+
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
+ // Gather information on the message queue.
+ mUpMessageQueue->fillParcelable(parcelable,
+ parcelable.mUpMessageQueueParcelable);
+
+ // Gather information on the data queue.
+ // TODO refactor into a SharedRingBuffer?
+ int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
+ parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
+ parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
+ parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+ parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+ return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of data.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::start() {
+ if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+ aaudio_result_t result = fake_hal_start(mStreamId);
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+ mState = AAUDIO_STREAM_STATE_STARTED;
+ if (result == AAUDIO_OK) {
+ mThreadEnabled.store(true);
+ result = mAAudioThread.start(this);
+ }
+ return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::pause() {
+ if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+ sendCurrentTimestamp();
+ aaudio_result_t result = fake_hal_pause(mStreamId);
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
+ mState = AAUDIO_STREAM_STATE_PAUSED;
+ mFramesRead.reset32();
+ ALOGD("AAudioServiceStreamFakeHal::pause() sent AAUDIO_SERVICE_EVENT_PAUSED");
+ mThreadEnabled.store(false);
+ result = mAAudioThread.stop();
+ return result;
+}
+
+/**
+ * Discard any data held by the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::flush() {
+ if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+ // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+ ALOGD("AAudioServiceStreamFakeHal::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+ mState = AAUDIO_STREAM_STATE_FLUSHED;
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamFakeHal::close() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (mStreamId != nullptr) {
+ result = fake_hal_close(mStreamId);
+ mStreamId = nullptr;
+ }
+ return result;
+}
+
+void AAudioServiceStreamFakeHal::sendCurrentTimestamp() {
+ int frameCounter = 0;
+ int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
+ if (error < 0) {
+ ALOGE("AAudioServiceStreamFakeHal::sendCurrentTimestamp() error %d",
+ error);
+ } else if (frameCounter != mPreviousFrameCounter) {
+ AAudioServiceMessage command;
+ command.what = AAudioServiceMessage::code::TIMESTAMP;
+ mFramesRead.update32(frameCounter);
+ command.timestamp.position = mFramesRead.get();
+ ALOGD("AAudioServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
+ frameCounter, (int)mFramesRead.get());
+ command.timestamp.timestamp = AudioClock::getNanoseconds();
+ mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+ mPreviousFrameCounter = frameCounter;
+ }
+}
+
+// implement Runnable
+void AAudioServiceStreamFakeHal::run() {
+ TimestampScheduler timestampScheduler;
+ timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+ timestampScheduler.start(AudioClock::getNanoseconds());
+ while(mThreadEnabled.load()) {
+ aaudio_nanoseconds_t nextTime = timestampScheduler.nextAbsoluteTime();
+ if (AudioClock::getNanoseconds() >= nextTime) {
+ sendCurrentTimestamp();
+ } else {
+ // Sleep until it is time to send the next timestamp.
+ AudioClock::sleepUntilNanoTime(nextTime);
+ }
+ }
+}
+
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.h b/services/oboeservice/AAudioServiceStreamFakeHal.h
new file mode 100644
index 0000000..e9480fb
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
+
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceStreamBase.h"
+#include "FakeAudioHal.h"
+#include "MonotonicCounter.h"
+#include "AudioEndpointParcelable.h"
+#include "TimestampScheduler.h"
+
+namespace aaudio {
+
+class AAudioServiceStreamFakeHal
+ : public AAudioServiceStreamBase
+ , public Runnable {
+
+public:
+ AAudioServiceStreamFakeHal();
+ virtual ~AAudioServiceStreamFakeHal();
+
+ virtual aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) override;
+
+ virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+ /**
+ * Start the flow of data.
+ */
+ virtual aaudio_result_t start() override;
+
+ /**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+ virtual aaudio_result_t pause() override;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ */
+ virtual aaudio_result_t flush() override;
+
+ virtual aaudio_result_t close() override;
+
+ void sendCurrentTimestamp();
+
+ virtual void run() override; // to implement Runnable
+
+private:
+ fake_hal_stream_ptr mStreamId; // Move to HAL
+
+ MonotonicCounter mFramesWritten;
+ MonotonicCounter mFramesRead;
+ int mHalFileDescriptor = -1;
+ int mPreviousFrameCounter = 0; // from HAL
+
+ aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+
+ AAudioThread mAAudioThread;
+ std::atomic<bool> mThreadEnabled;
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
new file mode 100644
index 0000000..f5e5784
--- /dev/null
+++ b/services/oboeservice/AAudioThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "AAudioThread.h"
+
+using namespace aaudio;
+
+
+AAudioThread::AAudioThread() {
+ // mThread is a pthread_t of unknown size so we need memset.
+ memset(&mThread, 0, sizeof(mThread));
+}
+
+void AAudioThread::dispatch() {
+ if (mRunnable != nullptr) {
+ mRunnable->run();
+ } else {
+ run();
+ }
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void * AAudioThread_internalThreadProc(void *arg) {
+ AAudioThread *aaudioThread = (AAudioThread *) arg;
+ aaudioThread->dispatch();
+ return nullptr;
+}
+
+aaudio_result_t AAudioThread::start(Runnable *runnable) {
+ if (mHasThread) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ mRunnable = runnable; // TODO use atomic?
+ int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
+ if (err != 0) {
+ ALOGE("AAudioThread::pthread_create() returned %d", err);
+ // TODO convert errno to aaudio_result_t
+ return AAUDIO_ERROR_INTERNAL;
+ } else {
+ mHasThread = true;
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AAudioThread::stop() {
+ if (!mHasThread) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ int err = pthread_join(mThread, nullptr);
+ mHasThread = false;
+ // TODO convert errno to aaudio_result_t
+ return err ? AAUDIO_ERROR_INTERNAL : AAUDIO_OK;
+}
+
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
new file mode 100644
index 0000000..1f676dc
--- /dev/null
+++ b/services/oboeservice/AAudioThread.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_THREAD_H
+#define AAUDIO_THREAD_H
+
+#include <atomic>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+class Runnable {
+public:
+ Runnable() {};
+ virtual ~Runnable() = default;
+
+ virtual void run() {}
+};
+
+/**
+ * Abstraction for a host thread.
+ */
+class AAudioThread
+{
+public:
+ AAudioThread();
+ AAudioThread(Runnable *runnable);
+ virtual ~AAudioThread() = default;
+
+ /**
+ * Start the thread running.
+ */
+ aaudio_result_t start(Runnable *runnable = nullptr);
+
+ /**
+ * Join the thread.
+ * The caller must somehow tell the thread to exit before calling join().
+ */
+ aaudio_result_t stop();
+
+ /**
+ * This will get called in the thread.
+ * Override this or pass a Runnable to start().
+ */
+ virtual void run() {};
+
+ void dispatch(); // called internally from 'C' thread wrapper
+
+private:
+ Runnable* mRunnable = nullptr; // TODO make atomic with memory barrier?
+ bool mHasThread = false;
+ pthread_t mThread; // initialized in constructor
+
+};
+
+} /* namespace aaudio */
+
+#endif ///AAUDIO_THREAD_H
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 07b4d76..5cd9121 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -1,48 +1,50 @@
LOCAL_PATH:= $(call my-dir)
-# Oboe Service
+# AAudio Service
include $(CLEAR_VARS)
-LOCAL_MODULE := oboeservice
+LOCAL_MODULE := aaudioservice
LOCAL_MODULE_TAGS := optional
-LIBOBOE_DIR := ../../media/liboboe
-LIBOBOE_SRC_DIR := $(LIBOBOE_DIR)/src
+LIBAAUDIO_DIR := ../../media/libaaudio
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/native/include \
system/core/base/include \
- $(TOP)/frameworks/native/media/liboboe/include/include \
- $(TOP)/frameworks/av/media/liboboe/include \
+ $(TOP)/frameworks/native/media/libaaudio/include/include \
+ $(TOP)/frameworks/av/media/libaaudio/include \
frameworks/native/include \
$(TOP)/external/tinyalsa/include \
- $(TOP)/frameworks/av/media/liboboe/src \
- $(TOP)/frameworks/av/media/liboboe/src/binding \
- $(TOP)/frameworks/av/media/liboboe/src/client \
- $(TOP)/frameworks/av/media/liboboe/src/core \
- $(TOP)/frameworks/av/media/liboboe/src/fifo \
- $(TOP)/frameworks/av/media/liboboe/src/utility
+ $(TOP)/frameworks/av/media/libaaudio/src \
+ $(TOP)/frameworks/av/media/libaaudio/src/binding \
+ $(TOP)/frameworks/av/media/libaaudio/src/client \
+ $(TOP)/frameworks/av/media/libaaudio/src/core \
+ $(TOP)/frameworks/av/media/libaaudio/src/fifo \
+ $(TOP)/frameworks/av/media/libaaudio/src/utility
-# TODO These could be in a liboboe_common library
+# TODO These could be in a libaaudio_common library
LOCAL_SRC_FILES += \
- $(LIBOBOE_SRC_DIR)/utility/HandleTracker.cpp \
- $(LIBOBOE_SRC_DIR)/utility/OboeUtilities.cpp \
- $(LIBOBOE_SRC_DIR)/fifo/FifoBuffer.cpp \
- $(LIBOBOE_SRC_DIR)/fifo/FifoControllerBase.cpp \
- $(LIBOBOE_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
- $(LIBOBOE_SRC_DIR)/binding/SharedRegionParcelable.cpp \
- $(LIBOBOE_SRC_DIR)/binding/RingBufferParcelable.cpp \
- $(LIBOBOE_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
- $(LIBOBOE_SRC_DIR)/binding/OboeStreamRequest.cpp \
- $(LIBOBOE_SRC_DIR)/binding/OboeStreamConfiguration.cpp \
- $(LIBOBOE_SRC_DIR)/binding/IOboeAudioService.cpp \
+ $(LIBAAUDIO_SRC_DIR)/utility/HandleTracker.cpp \
+ $(LIBAAUDIO_SRC_DIR)/utility/AAudioUtilities.cpp \
+ $(LIBAAUDIO_SRC_DIR)/fifo/FifoBuffer.cpp \
+ $(LIBAAUDIO_SRC_DIR)/fifo/FifoControllerBase.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/SharedRegionParcelable.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/RingBufferParcelable.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamRequest.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamConfiguration.cpp \
+ $(LIBAAUDIO_SRC_DIR)/binding/IAAudioService.cpp \
SharedRingBuffer.cpp \
FakeAudioHal.cpp \
- OboeAudioService.cpp \
- OboeServiceStreamBase.cpp \
- OboeServiceStreamFakeHal.cpp \
- OboeServiceMain.cpp
+ AAudioService.cpp \
+ AAudioServiceStreamBase.cpp \
+ AAudioServiceStreamFakeHal.cpp \
+ TimestampScheduler.cpp \
+ AAudioServiceMain.cpp \
+ AAudioThread.cpp
LOCAL_CFLAGS += -Wno-unused-parameter
LOCAL_CFLAGS += -Wall -Werror
diff --git a/services/oboeservice/FakeAudioHal.cpp b/services/oboeservice/FakeAudioHal.cpp
index 7fa2eef..34a2476 100644
--- a/services/oboeservice/FakeAudioHal.cpp
+++ b/services/oboeservice/FakeAudioHal.cpp
@@ -32,7 +32,7 @@
#include "FakeAudioHal.h"
-//using namespace oboe;
+//using namespace aaudio;
using sample_t = int16_t;
using std::cout;
@@ -94,14 +94,25 @@
#define FRAMES_PER_BURST_QUALCOMM 192
#define FRAMES_PER_BURST_NVIDIA 128
-int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *streamPP) {
+int fake_hal_open(int card_id, int device_id,
+ int frameCapacity,
+ fake_hal_stream_ptr *streamPP) {
int framesPerBurst = FRAMES_PER_BURST_QUALCOMM; // TODO update as needed
+ int periodCountRequested = frameCapacity / framesPerBurst;
int periodCount = 32;
unsigned int offset1;
unsigned int frames1;
void *area = nullptr;
int mmapAvail = 0;
+ // Try to match requested size with a power of 2.
+ while (periodCount < periodCountRequested && periodCount < 1024) {
+ periodCount *= 2;
+ }
+ std::cout << "fake_hal_open() requested frameCapacity = " << frameCapacity << std::endl;
+ std::cout << "fake_hal_open() periodCountRequested = " << periodCountRequested << std::endl;
+ std::cout << "fake_hal_open() periodCount = " << periodCount << std::endl;
+
// Configuration for an ALSA stream.
pcm_config cfg;
memset(&cfg, 0, sizeof(cfg));
diff --git a/services/oboeservice/FakeAudioHal.h b/services/oboeservice/FakeAudioHal.h
index d6f28b2..d3aa4e8 100644
--- a/services/oboeservice/FakeAudioHal.h
+++ b/services/oboeservice/FakeAudioHal.h
@@ -21,7 +21,7 @@
#ifndef FAKE_AUDIO_HAL_H
#define FAKE_AUDIO_HAL_H
-//namespace oboe {
+//namespace aaudio {
using sample_t = int16_t;
struct mmap_buffer_info {
@@ -39,7 +39,9 @@
//extern "C"
//{
-int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *stream_pp);
+int fake_hal_open(int card_id, int device_id,
+ int frameCapacity,
+ fake_hal_stream_ptr *stream_pp);
int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info);
@@ -53,6 +55,6 @@
//} /* "C" */
-//} /* namespace oboe */
+//} /* namespace aaudio */
#endif // FAKE_AUDIO_HAL_H
diff --git a/services/oboeservice/OboeAudioService.cpp b/services/oboeservice/OboeAudioService.cpp
deleted file mode 100644
index caddc1d..0000000
--- a/services/oboeservice/OboeAudioService.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <time.h>
-#include <pthread.h>
-
-#include <oboe/OboeDefinitions.h>
-
-#include "HandleTracker.h"
-#include "IOboeAudioService.h"
-#include "OboeService.h"
-#include "OboeAudioService.h"
-#include "OboeServiceStreamFakeHal.h"
-
-using namespace android;
-using namespace oboe;
-
-typedef enum
-{
- OBOE_HANDLE_TYPE_STREAM,
- OBOE_HANDLE_TYPE_COUNT
-} oboe_service_handle_type_t;
-static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
-
-oboe_handle_t OboeAudioService::openStream(oboe::OboeStreamRequest &request,
- oboe::OboeStreamConfiguration &configuration) {
- OboeServiceStreamBase *serviceStream = new OboeServiceStreamFakeHal();
- ALOGD("OboeAudioService::openStream(): created serviceStream = %p", serviceStream);
- oboe_result_t result = serviceStream->open(request, configuration);
- if (result < 0) {
- ALOGE("OboeAudioService::openStream(): open returned %d", result);
- return result;
- } else {
- OboeStream handle = mHandleTracker.put(OBOE_HANDLE_TYPE_STREAM, serviceStream);
- ALOGD("OboeAudioService::openStream(): handle = 0x%08X", handle);
- if (handle < 0) {
- delete serviceStream;
- }
- return handle;
- }
-}
-
-oboe_result_t OboeAudioService::closeStream(oboe_handle_t streamHandle) {
- OboeServiceStreamBase *serviceStream = (OboeServiceStreamBase *)
- mHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM,
- streamHandle);
- ALOGI("OboeAudioService.closeStream(0x%08X)", streamHandle);
- if (serviceStream != nullptr) {
- ALOGD("OboeAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
- delete serviceStream;
- return OBOE_OK;
- }
- return OBOE_ERROR_INVALID_HANDLE;
-}
-
-OboeServiceStreamBase *OboeAudioService::convertHandleToServiceStream(
- oboe_handle_t streamHandle) const {
- return (OboeServiceStreamBase *) mHandleTracker.get(OBOE_HANDLE_TYPE_STREAM,
- (oboe_handle_t)streamHandle);
-}
-
-oboe_result_t OboeAudioService::getStreamDescription(
- oboe_handle_t streamHandle,
- oboe::AudioEndpointParcelable &parcelable) {
- ALOGI("OboeAudioService::getStreamDescriptor(), streamHandle = 0x%08x", streamHandle);
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::getStreamDescriptor(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- return OBOE_ERROR_INVALID_HANDLE;
- }
- return serviceStream->getDescription(parcelable);
-}
-
-oboe_result_t OboeAudioService::startStream(oboe_handle_t streamHandle) {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::startStream(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- return OBOE_ERROR_INVALID_HANDLE;
- }
- mLatestHandle = streamHandle;
- return serviceStream->start();
-}
-
-oboe_result_t OboeAudioService::pauseStream(oboe_handle_t streamHandle) {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::pauseStream(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- return OBOE_ERROR_INVALID_HANDLE;
- }
- return serviceStream->pause();
-}
-
-oboe_result_t OboeAudioService::flushStream(oboe_handle_t streamHandle) {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::flushStream(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- return OBOE_ERROR_INVALID_HANDLE;
- }
- return serviceStream->flush();
-}
-
-void OboeAudioService::tickle() {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(mLatestHandle);
- //ALOGI("OboeAudioService::tickle(), serviceStream = %p", serviceStream);
- if (serviceStream != nullptr) {
- serviceStream->tickle();
- }
-}
-
-oboe_result_t OboeAudioService::registerAudioThread(oboe_handle_t streamHandle,
- pid_t clientThreadId,
- oboe_nanoseconds_t periodNanoseconds) {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- ALOGE("OboeAudioService::registerAudioThread(), serviceStream == nullptr");
- return OBOE_ERROR_INVALID_HANDLE;
- }
- if (serviceStream->getRegisteredThread() != OboeServiceStreamBase::ILLEGAL_THREAD_ID) {
- ALOGE("OboeAudioService::registerAudioThread(), thread already registered");
- return OBOE_ERROR_INVALID_ORDER;
- }
- serviceStream->setRegisteredThread(clientThreadId);
- // Boost client thread to SCHED_FIFO
- struct sched_param sp;
- memset(&sp, 0, sizeof(sp));
- sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
- int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
- if (err != 0){
- ALOGE("OboeAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
- errno, sp.sched_priority);
- return OBOE_ERROR_INTERNAL;
- } else {
- return OBOE_OK;
- }
-}
-
-oboe_result_t OboeAudioService::unregisterAudioThread(oboe_handle_t streamHandle,
- pid_t clientThreadId) {
- OboeServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("OboeAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
- if (serviceStream == nullptr) {
- ALOGE("OboeAudioService::unregisterAudioThread(), serviceStream == nullptr");
- return OBOE_ERROR_INVALID_HANDLE;
- }
- if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("OboeAudioService::unregisterAudioThread(), wrong thread");
- return OBOE_ERROR_ILLEGAL_ARGUMENT;
- }
- serviceStream->setRegisteredThread(0);
- return OBOE_OK;
-}
diff --git a/services/oboeservice/OboeAudioService.h b/services/oboeservice/OboeAudioService.h
deleted file mode 100644
index df3cbf8..0000000
--- a/services/oboeservice/OboeAudioService.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOE_AUDIO_SERVICE_H
-#define OBOE_OBOE_AUDIO_SERVICE_H
-
-#include <time.h>
-#include <pthread.h>
-
-#include <binder/BinderService.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "HandleTracker.h"
-#include "IOboeAudioService.h"
-#include "OboeService.h"
-#include "OboeServiceStreamBase.h"
-
-using namespace android;
-namespace oboe {
-
-class OboeAudioService :
- public BinderService<OboeAudioService>,
- public BnOboeAudioService
-{
- friend class BinderService<OboeAudioService>; // for OboeAudioService()
-public:
-// TODO why does this fail? static const char* getServiceName() ANDROID_API { return "media.audio_oboe"; }
- static const char* getServiceName() { return "media.audio_oboe"; }
-
- virtual oboe_handle_t openStream(OboeStreamRequest &request,
- OboeStreamConfiguration &configuration);
-
- virtual oboe_result_t closeStream(oboe_handle_t streamHandle);
-
- virtual oboe_result_t getStreamDescription(
- oboe_handle_t streamHandle,
- AudioEndpointParcelable &parcelable);
-
- virtual oboe_result_t startStream(oboe_handle_t streamHandle);
-
- virtual oboe_result_t pauseStream(oboe_handle_t streamHandle);
-
- virtual oboe_result_t flushStream(oboe_handle_t streamHandle);
-
- virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle,
- pid_t pid, oboe_nanoseconds_t periodNanoseconds) ;
-
- virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle, pid_t pid);
-
- virtual void tickle();
-
-private:
-
- OboeServiceStreamBase *convertHandleToServiceStream(oboe_handle_t streamHandle) const;
-
- HandleTracker mHandleTracker;
- oboe_handle_t mLatestHandle = OBOE_ERROR_INVALID_HANDLE; // TODO until we have service threads
-};
-
-} /* namespace oboe */
-
-#endif //OBOE_OBOE_AUDIO_SERVICE_H
diff --git a/services/oboeservice/OboeService.h b/services/oboeservice/OboeService.h
deleted file mode 100644
index a24f525..0000000
--- a/services/oboeservice/OboeService.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOE_SERVICE_H
-#define OBOE_OBOE_SERVICE_H
-
-#include <stdint.h>
-
-#include <oboe/OboeAudio.h>
-
-#include "binding/RingBufferParcelable.h"
-
-namespace oboe {
-
-// TODO move this an "include" folder for the service.
-
-struct OboeMessageTimestamp {
- oboe_position_frames_t position;
- int64_t deviceOffset; // add to client position to get device position
- oboe_nanoseconds_t timestamp;
-};
-
-typedef enum oboe_service_event_e : uint32_t {
- OBOE_SERVICE_EVENT_STARTED,
- OBOE_SERVICE_EVENT_PAUSED,
- OBOE_SERVICE_EVENT_FLUSHED,
- OBOE_SERVICE_EVENT_CLOSED,
- OBOE_SERVICE_EVENT_DISCONNECTED
-} oboe_service_event_t;
-
-struct OboeMessageEvent {
- oboe_service_event_t event;
- int32_t data1;
- int64_t data2;
-};
-
-typedef struct OboeServiceMessage_s {
- enum class code : uint32_t {
- NOTHING,
- TIMESTAMP,
- EVENT,
- };
-
- code what;
- union {
- OboeMessageTimestamp timestamp;
- OboeMessageEvent event;
- };
-} OboeServiceMessage;
-
-
-} /* namespace oboe */
-
-#endif //OBOE_OBOE_SERVICE_H
diff --git a/services/oboeservice/OboeServiceStreamBase.h b/services/oboeservice/OboeServiceStreamBase.h
deleted file mode 100644
index 736c754..0000000
--- a/services/oboeservice/OboeServiceStreamBase.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOE_SERVICE_STREAM_BASE_H
-#define OBOE_OBOE_SERVICE_STREAM_BASE_H
-
-#include "IOboeAudioService.h"
-#include "OboeService.h"
-#include "AudioStream.h"
-#include "fifo/FifoBuffer.h"
-#include "SharedRingBuffer.h"
-#include "AudioEndpointParcelable.h"
-
-namespace oboe {
-
-// We expect the queue to only have a few commands.
-// This should be way more than we need.
-#define QUEUE_UP_CAPACITY_COMMANDS (128)
-
-class OboeServiceStreamBase {
-
-public:
- OboeServiceStreamBase();
- virtual ~OboeServiceStreamBase();
-
- enum {
- ILLEGAL_THREAD_ID = 0
- };
-
- /**
- * Fill in a parcelable description of stream.
- */
- virtual oboe_result_t getDescription(oboe::AudioEndpointParcelable &parcelable) = 0;
-
- /**
- * Open the device.
- */
- virtual oboe_result_t open(oboe::OboeStreamRequest &request,
- oboe::OboeStreamConfiguration &configuration) = 0;
-
- /**
- * Start the flow of data.
- */
- virtual oboe_result_t start() = 0;
-
- /**
- * Stop the flow of data such that start() can resume with loss of data.
- */
- virtual oboe_result_t pause() = 0;
-
- /**
- * Discard any data held by the underlying HAL or Service.
- */
- virtual oboe_result_t flush() = 0;
-
- virtual oboe_result_t close() = 0;
-
- virtual void tickle() = 0;
-
- virtual void sendServiceEvent(oboe_service_event_t event,
- int32_t data1 = 0,
- int64_t data2 = 0);
-
- virtual void setRegisteredThread(pid_t pid) {
- mRegisteredClientThread = pid;
- }
- virtual pid_t getRegisteredThread() {
- return mRegisteredClientThread;
- }
-
-protected:
-
- pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
-
- SharedRingBuffer * mUpMessageQueue;
-
- oboe_sample_rate_t mSampleRate = 0;
- oboe_size_bytes_t mBytesPerFrame = 0;
- oboe_size_frames_t mFramesPerBurst = 0;
- oboe_size_frames_t mCapacityInFrames = 0;
- oboe_size_bytes_t mCapacityInBytes = 0;
-};
-
-} /* namespace oboe */
-
-#endif //OBOE_OBOE_SERVICE_STREAM_BASE_H
diff --git a/services/oboeservice/OboeServiceStreamFakeHal.cpp b/services/oboeservice/OboeServiceStreamFakeHal.cpp
deleted file mode 100644
index dbbc860..0000000
--- a/services/oboeservice/OboeServiceStreamFakeHal.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-
-#include "OboeServiceStreamBase.h"
-#include "OboeServiceStreamFakeHal.h"
-
-#include "FakeAudioHal.h"
-
-using namespace android;
-using namespace oboe;
-
-// HACK values for Marlin
-#define CARD_ID 0
-#define DEVICE_ID 19
-
-/**
- * Construct the audio message queuues and message queues.
- */
-
-OboeServiceStreamFakeHal::OboeServiceStreamFakeHal()
- : OboeServiceStreamBase()
- , mStreamId(nullptr)
- , mPreviousFrameCounter(0)
-{
-}
-
-OboeServiceStreamFakeHal::~OboeServiceStreamFakeHal() {
- ALOGD("OboeServiceStreamFakeHal::~OboeServiceStreamFakeHal() call close()");
- close();
-}
-
-oboe_result_t OboeServiceStreamFakeHal::open(oboe::OboeStreamRequest &request,
- oboe::OboeStreamConfiguration &configuration) {
- // Open stream on HAL and pass information about the ring buffer to the client.
- mmap_buffer_info mmapInfo;
- oboe_result_t error;
-
- // Open HAL
- error = fake_hal_open(CARD_ID, DEVICE_ID, &mStreamId);
- if(error < 0) {
- ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
- return error;
- }
-
- // Get information about the shared audio buffer.
- error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
- if (error < 0) {
- ALOGE("fake_hal_get_mmap_info returned %d", error);
- fake_hal_close(mStreamId);
- mStreamId = nullptr;
- return error;
- }
- mHalFileDescriptor = mmapInfo.fd;
- mFramesPerBurst = mmapInfo.burst_size_in_frames;
- mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
- mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
- mSampleRate = mmapInfo.sample_rate;
- mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
- ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
- mmapInfo.burst_size_in_frames);
- ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
- mmapInfo.buffer_capacity_in_frames);
- ALOGD("OboeServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
- mmapInfo.buffer_capacity_in_bytes);
-
- // Fill in OboeStreamConfiguration
- configuration.setSampleRate(mSampleRate);
- configuration.setSamplesPerFrame(mmapInfo.channel_count);
- configuration.setAudioFormat(OBOE_AUDIO_FORMAT_PCM16);
- return OBOE_OK;
-}
-
-/**
- * Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
-oboe_result_t OboeServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
- // Gather information on the message queue.
- mUpMessageQueue->fillParcelable(parcelable,
- parcelable.mUpMessageQueueParcelable);
-
- // Gather information on the data queue.
- // TODO refactor into a SharedRingBuffer?
- int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
- parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
- parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
- parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
- parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
- return OBOE_OK;
-}
-
-/**
- * Start the flow of data.
- */
-oboe_result_t OboeServiceStreamFakeHal::start() {
- if (mStreamId == nullptr) return OBOE_ERROR_NULL;
- oboe_result_t result = fake_hal_start(mStreamId);
- sendServiceEvent(OBOE_SERVICE_EVENT_STARTED);
- mState = OBOE_STREAM_STATE_STARTED;
- return result;
-}
-
-/**
- * Stop the flow of data such that start() can resume with loss of data.
- */
-oboe_result_t OboeServiceStreamFakeHal::pause() {
- if (mStreamId == nullptr) return OBOE_ERROR_NULL;
- sendCurrentTimestamp();
- oboe_result_t result = fake_hal_pause(mStreamId);
- sendServiceEvent(OBOE_SERVICE_EVENT_PAUSED);
- mState = OBOE_STREAM_STATE_PAUSED;
- mFramesRead.reset32();
- ALOGD("OboeServiceStreamFakeHal::pause() sent OBOE_SERVICE_EVENT_PAUSED");
- return result;
-}
-
-/**
- * Discard any data held by the underlying HAL or Service.
- */
-oboe_result_t OboeServiceStreamFakeHal::flush() {
- if (mStreamId == nullptr) return OBOE_ERROR_NULL;
- // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
- ALOGD("OboeServiceStreamFakeHal::pause() send OBOE_SERVICE_EVENT_FLUSHED");
- sendServiceEvent(OBOE_SERVICE_EVENT_FLUSHED);
- mState = OBOE_STREAM_STATE_FLUSHED;
- return OBOE_OK;
-}
-
-oboe_result_t OboeServiceStreamFakeHal::close() {
- oboe_result_t result = OBOE_OK;
- if (mStreamId != nullptr) {
- result = fake_hal_close(mStreamId);
- mStreamId = nullptr;
- }
- return result;
-}
-
-void OboeServiceStreamFakeHal::sendCurrentTimestamp() {
- int frameCounter = 0;
- int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
- if (error < 0) {
- ALOGE("OboeServiceStreamFakeHal::sendCurrentTimestamp() error %d",
- error);
- } else if (frameCounter != mPreviousFrameCounter) {
- OboeServiceMessage command;
- command.what = OboeServiceMessage::code::TIMESTAMP;
- mFramesRead.update32(frameCounter);
- command.timestamp.position = mFramesRead.get();
- ALOGV("OboeServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
- frameCounter, (int)mFramesRead.get());
- command.timestamp.timestamp = AudioClock::getNanoseconds();
- mUpMessageQueue->getFifoBuffer()->write(&command, 1);
- mPreviousFrameCounter = frameCounter;
- }
-}
-
-void OboeServiceStreamFakeHal::tickle() {
- if (mStreamId != nullptr) {
- switch (mState) {
- case OBOE_STREAM_STATE_STARTING:
- case OBOE_STREAM_STATE_STARTED:
- case OBOE_STREAM_STATE_PAUSING:
- case OBOE_STREAM_STATE_STOPPING:
- sendCurrentTimestamp();
- break;
- default:
- break;
- }
- }
-}
-
diff --git a/services/oboeservice/OboeServiceStreamFakeHal.h b/services/oboeservice/OboeServiceStreamFakeHal.h
deleted file mode 100644
index b026d34..0000000
--- a/services/oboeservice/OboeServiceStreamFakeHal.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
-#define OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
-
-#include "OboeService.h"
-#include "OboeServiceStreamBase.h"
-#include "FakeAudioHal.h"
-#include "MonotonicCounter.h"
-#include "AudioEndpointParcelable.h"
-
-namespace oboe {
-
-class OboeServiceStreamFakeHal : public OboeServiceStreamBase {
-
-public:
- OboeServiceStreamFakeHal();
- virtual ~OboeServiceStreamFakeHal();
-
- virtual oboe_result_t getDescription(AudioEndpointParcelable &parcelable) override;
-
- virtual oboe_result_t open(oboe::OboeStreamRequest &request,
- oboe::OboeStreamConfiguration &configuration) override;
-
- /**
- * Start the flow of data.
- */
- virtual oboe_result_t start() override;
-
- /**
- * Stop the flow of data such that start() can resume with loss of data.
- */
- virtual oboe_result_t pause() override;
-
- /**
- * Discard any data held by the underlying HAL or Service.
- */
- virtual oboe_result_t flush() override;
-
- virtual oboe_result_t close() override;
-
- virtual void tickle() override;
-
-protected:
-
- void sendCurrentTimestamp();
-
-private:
- fake_hal_stream_ptr mStreamId; // Move to HAL
-
- MonotonicCounter mFramesWritten;
- MonotonicCounter mFramesRead;
- int mHalFileDescriptor = -1;
- int mPreviousFrameCounter = 0; // from HAL
-
- oboe_stream_state_t mState = OBOE_STREAM_STATE_UNINITIALIZED;
-};
-
-} // namespace oboe
-
-#endif //OBOE_OBOE_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index c3df5ce..9ac8fdf 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -14,20 +14,20 @@
* limitations under the License.
*/
-#define LOG_TAG "OboeService"
+#define LOG_TAG "AAudioService"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include "AudioClock.h"
#include "AudioEndpointParcelable.h"
-//#include "OboeServiceStreamBase.h"
-//#include "OboeServiceStreamFakeHal.h"
+//#include "AAudioServiceStreamBase.h"
+//#include "AAudioServiceStreamFakeHal.h"
#include "SharedRingBuffer.h"
using namespace android;
-using namespace oboe;
+using namespace aaudio;
SharedRingBuffer::~SharedRingBuffer()
{
@@ -39,23 +39,23 @@
}
}
-oboe_result_t SharedRingBuffer::allocate(fifo_frames_t bytesPerFrame,
+aaudio_result_t SharedRingBuffer::allocate(fifo_frames_t bytesPerFrame,
fifo_frames_t capacityInFrames) {
mCapacityInFrames = capacityInFrames;
// Create shared memory large enough to hold the data and the read and write counters.
mDataMemorySizeInBytes = bytesPerFrame * capacityInFrames;
mSharedMemorySizeInBytes = mDataMemorySizeInBytes + (2 * (sizeof(fifo_counter_t)));
- mFileDescriptor = ashmem_create_region("OboeSharedRingBuffer", mSharedMemorySizeInBytes);
+ mFileDescriptor = ashmem_create_region("AAudioSharedRingBuffer", mSharedMemorySizeInBytes);
if (mFileDescriptor < 0) {
ALOGE("SharedRingBuffer::allocate() ashmem_create_region() failed %d", errno);
- return OBOE_ERROR_INTERNAL;
+ return AAUDIO_ERROR_INTERNAL;
}
int err = ashmem_set_prot_region(mFileDescriptor, PROT_READ|PROT_WRITE); // TODO error handling?
if (err < 0) {
ALOGE("SharedRingBuffer::allocate() ashmem_set_prot_region() failed %d", errno);
close(mFileDescriptor);
- return OBOE_ERROR_INTERNAL; // TODO convert errno to a better OBOE_ERROR;
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
// Map the fd to memory addresses.
@@ -66,7 +66,7 @@
if (mSharedMemory == MAP_FAILED) {
ALOGE("SharedRingBuffer::allocate() mmap() failed %d", errno);
close(mFileDescriptor);
- return OBOE_ERROR_INTERNAL; // TODO convert errno to a better OBOE_ERROR;
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
// Get addresses for our counters and data from the shared memory.
@@ -78,7 +78,7 @@
mFifoBuffer = new(std::nothrow) FifoBuffer(bytesPerFrame, capacityInFrames,
readCounterAddress, writeCounterAddress, dataAddress);
- return (mFifoBuffer == nullptr) ? OBOE_ERROR_NO_MEMORY : OBOE_OK;
+ return (mFifoBuffer == nullptr) ? AAUDIO_ERROR_NO_MEMORY : AAUDIO_OK;
}
void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index 3cc1c2d..75f138b 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef OBOE_SHARED_RINGBUFFER_H
-#define OBOE_SHARED_RINGBUFFER_H
+#ifndef AAUDIO_SHARED_RINGBUFFER_H
+#define AAUDIO_SHARED_RINGBUFFER_H
#include <stdint.h>
#include <cutils/ashmem.h>
@@ -25,7 +25,7 @@
#include "RingBufferParcelable.h"
#include "AudioEndpointParcelable.h"
-namespace oboe {
+namespace aaudio {
// Determine the placement of the counters and data in shared memory.
#define SHARED_RINGBUFFER_READ_OFFSET 0
@@ -41,7 +41,7 @@
virtual ~SharedRingBuffer();
- oboe_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
+ aaudio_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
void fillParcelable(AudioEndpointParcelable &endpointParcelable,
RingBufferParcelable &ringBufferParcelable);
@@ -59,6 +59,6 @@
fifo_frames_t mCapacityInFrames = 0;
};
-} /* namespace oboe */
+} /* namespace aaudio */
-#endif //OBOE_SHARED_RINGBUFFER_H
+#endif //AAUDIO_SHARED_RINGBUFFER_H
diff --git a/services/oboeservice/TimestampScheduler.cpp b/services/oboeservice/TimestampScheduler.cpp
new file mode 100644
index 0000000..5875909
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// for random()
+#include <stdlib.h>
+
+#include "TimestampScheduler.h"
+
+using namespace aaudio;
+
+void TimestampScheduler::start(aaudio_nanoseconds_t startTime) {
+ mStartTime = startTime;
+ mLastTime = startTime;
+}
+
+aaudio_nanoseconds_t TimestampScheduler::nextAbsoluteTime() {
+ int64_t periodsElapsed = (mLastTime - mStartTime) / mBurstPeriod;
+ // This is an arbitrary schedule that could probably be improved.
+ // It starts out sending a timestamp on every period because we want to
+ // get an accurate picture when the stream starts. Then it slows down
+ // to the occasional timestamps needed to detect a slow drift.
+ int64_t minPeriodsToDelay = (periodsElapsed < 10) ? 1 :
+ (periodsElapsed < 100) ? 3 :
+ (periodsElapsed < 1000) ? 10 : 50;
+ aaudio_nanoseconds_t sleepTime = minPeriodsToDelay * mBurstPeriod;
+ // Generate a random rectangular distribution one burst wide so that we get
+ // an uncorrelated sampling of the MMAP pointer.
+ sleepTime += (aaudio_nanoseconds_t)(random() * mBurstPeriod / RAND_MAX);
+ mLastTime += sleepTime;
+ return mLastTime;
+}
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
new file mode 100644
index 0000000..efc9c5f
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
+#define AAUDIO_TIMESTAMP_SCHEDULER_H
+
+//#include <stdlib.h> // random()
+
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AudioStream.h"
+#include "fifo/FifoBuffer.h"
+#include "SharedRingBuffer.h"
+#include "AudioEndpointParcelable.h"
+
+namespace aaudio {
+
+/**
+ * Schedule wakeup time for monitoring the position
+ * of an MMAP/NOIRQ buffer.
+ *
+ * Note that this object is not thread safe. Only call it from a single thread.
+ */
+class TimestampScheduler
+{
+public:
+ TimestampScheduler() {};
+ virtual ~TimestampScheduler() = default;
+
+ /**
+ * Start the schedule at the given time.
+ */
+ void start(aaudio_nanoseconds_t startTime);
+
+ /**
+ * Calculate the next time that the read position should be
+ * measured.
+ */
+ aaudio_nanoseconds_t nextAbsoluteTime();
+
+ void setBurstPeriod(aaudio_nanoseconds_t burstPeriod) {
+ mBurstPeriod = burstPeriod;
+ }
+
+ void setBurstPeriod(aaudio_size_frames_t framesPerBurst,
+ aaudio_sample_rate_t sampleRate) {
+ mBurstPeriod = AAUDIO_NANOS_PER_SECOND * framesPerBurst / sampleRate;
+ }
+
+ aaudio_nanoseconds_t getBurstPeriod() {
+ return mBurstPeriod;
+ }
+
+private:
+ // Start with an arbitrary default so we do not divide by zero.
+ aaudio_nanoseconds_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
+ aaudio_nanoseconds_t mStartTime;
+ aaudio_nanoseconds_t mLastTime;
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_TIMESTAMP_SCHEDULER_H */
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index a73ed8f..f7a73c3 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -68,10 +68,11 @@
radio_properties_t properties;
properties.handle =
(radio_handle_t)android_atomic_inc(&mNextUniqueId);
-
- ALOGI("loaded default module %s, handle %d", properties.product, properties.handle);
-
convertProperties(&properties, &halProperties);
+
+ ALOGI("loaded default module %s, ver %s, handle %d", properties.product,
+ properties.version, properties.handle);
+
sp<Module> module = new Module(dev, properties);
mModules.add(properties.handle, module);
}
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 54f9b95..78845b7 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -923,7 +923,10 @@
void SoundTriggerHwService::ModuleClient::onFirstRef()
{
- IInterface::asBinder(mClient)->linkToDeath(this);
+ sp<IBinder> binder = IInterface::asBinder(mClient);
+ if (binder != 0) {
+ binder->linkToDeath(this);
+ }
}
SoundTriggerHwService::ModuleClient::~ModuleClient()