Merge "mpeg2ts: report estimated duration only when stabilized"
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index 77b9a33..7c37955 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -51,8 +51,13 @@
ACaptureRequest* req, const ACameraOutputTarget* target) {
ATRACE_CALL();
if (req == nullptr || req->targets == nullptr || target == nullptr) {
+ void* req_targets;
+ if (req != nullptr)
+ req_targets = req->targets;
+ else
+ req_targets = nullptr;
ALOGE("%s: Error: invalid input: req %p, req-targets %p, target %p",
- __FUNCTION__, req, req->targets, target);
+ __FUNCTION__, req, req_targets, target);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
auto pair = req->targets->mOutputs.insert(*target);
@@ -67,8 +72,13 @@
ACaptureRequest* req, const ACameraOutputTarget* target) {
ATRACE_CALL();
if (req == nullptr || req->targets == nullptr || target == nullptr) {
+ void* req_targets;
+ if (req != nullptr)
+ req_targets = req->targets;
+ else
+ req_targets = nullptr;
ALOGE("%s: Error: invalid input: req %p, req-targets %p, target %p",
- __FUNCTION__, req, req->targets, target);
+ __FUNCTION__, req, req_targets, target);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
req->targets->mOutputs.erase(*target);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 40726a3..b28d509 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -129,7 +129,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libgui libcutils libui
+ libmedia libgui libcutils
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -153,7 +153,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libgui libcutils libui
+ libmedia libaudioclient libgui libcutils
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -186,7 +186,6 @@
libmedia \
libgui \
libcutils \
- libui \
libRScpp \
LOCAL_C_INCLUDES:= \
@@ -218,7 +217,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libgui libcutils libui libc
+ libcutils libc
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index 26135d7..3108a67 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -32,7 +32,6 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
@@ -401,8 +400,6 @@
ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index 410dd69..f219e69 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -30,7 +30,6 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/NuMediaExtractor.h>
#include <media/stagefright/RenderScriptWrapper.h>
@@ -738,8 +737,6 @@
ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
android::sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 0a3bdf3..4a83a4a 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -29,7 +29,6 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaMuxer.h>
@@ -319,9 +318,6 @@
}
ProcessState::self()->startThreadPool();
- // Make sure setDataSource() works.
- DataSource::RegisterDefaultSniffers();
-
sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 9aa0156..94c2e96 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -170,8 +170,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
#if 1
if (argc != 3) {
fprintf(stderr, "usage: %s <filename> <input_color_format>\n", argv[0]);
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index 8fe1dd4..12bbfd1 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -59,6 +59,114 @@
oldhandler(signum);
}
+namespace {
+
+enum {
+ kWhatFillThisBuffer = 'fill',
+ kWhatDrainThisBuffer = 'drai',
+ kWhatEOS = 'eos ',
+ kWhatStopCompleted = 'scom',
+ kWhatReleaseCompleted = 'rcom',
+ kWhatFlushCompleted = 'fcom',
+ kWhatError = 'erro',
+};
+
+class Sf2Callback : public CodecBase::Callback {
+public:
+ explicit Sf2Callback(const sp<AMessage> ¬ify);
+ ~Sf2Callback();
+
+ virtual void fillThisBuffer(IOMX::buffer_id bufferId, const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> &reply) override;
+ virtual void drainThisBuffer(IOMX::buffer_id bufferId, const sp<MediaCodecBuffer> &buffer,
+ int32_t flags, const sp<AMessage> &reply) override;
+ virtual void onEos(status_t err) override;
+ virtual void onStopCompleted() override;
+ virtual void onReleaseCompleted() override;
+ virtual void onFlushCompleted() override;
+ virtual void onError(status_t err, enum ActionCode actionCode) override;
+ // Events below are not handled; thus ignore.
+ virtual void onComponentAllocated(const char *) override {}
+ virtual void onComponentConfigured(const sp<AMessage> &, const sp<AMessage> &) override {}
+ virtual void onInputSurfaceCreated(
+ const sp<AMessage> &,
+ const sp<AMessage> &,
+ const sp<BufferProducerWrapper> &) override {}
+ virtual void onInputSurfaceCreationFailed(status_t) override {}
+ virtual void onInputSurfaceAccepted(const sp<AMessage> &, const sp<AMessage> &) override {}
+ virtual void onInputSurfaceDeclined(status_t) override {}
+ virtual void onSignaledInputEOS(status_t) override {}
+ virtual void onBuffersAllocated(int32_t, const sp<CodecBase::PortDescription> &) override {}
+ virtual void onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &) override {}
+private:
+ const sp<AMessage> mNotify;
+};
+
+Sf2Callback::Sf2Callback(const sp<AMessage> ¬ify) : mNotify(notify) {}
+
+Sf2Callback::~Sf2Callback() {}
+
+void Sf2Callback::fillThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> &reply) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFillThisBuffer);
+ notify->setInt32("buffer-id", bufferId);
+ notify->setObject("buffer", buffer);
+ notify->setMessage("reply", reply);
+ notify->post();
+}
+
+void Sf2Callback::drainThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ int32_t flags,
+ const sp<AMessage> &reply) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatDrainThisBuffer);
+ notify->setInt32("buffer-id", bufferId);
+ notify->setObject("buffer", buffer);
+ notify->setInt32("flags", flags);
+ notify->setMessage("reply", reply);
+ notify->post();
+}
+
+void Sf2Callback::onEos(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatEOS);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void Sf2Callback::onStopCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatStopCompleted);
+ notify->post();
+}
+
+void Sf2Callback::onReleaseCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatReleaseCompleted);
+ notify->post();
+}
+
+void Sf2Callback::onFlushCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFlushCompleted);
+ notify->post();
+}
+
+void Sf2Callback::onError(status_t err, enum ActionCode actionCode) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatError);
+ notify->setInt32("err", err);
+ notify->setInt32("actionCode", actionCode);
+ notify->post();
+}
+
+} // namespace
+
struct Controller : public AHandler {
Controller(const char *uri, bool decodeAudio,
const sp<Surface> &surface, bool renderToSurface)
@@ -148,8 +256,8 @@
mDecodeLooper->registerHandler(mCodec);
- mCodec->setNotificationMessage(
- new AMessage(kWhatCodecNotify, this));
+ mCodec->setCallback(
+ std::make_shared<Sf2Callback>(new AMessage(kWhatCodecNotify, this)));
sp<AMessage> format = makeFormat(mSource->getFormat());
@@ -210,27 +318,28 @@
int32_t what;
CHECK(msg->findInt32("what", &what));
- if (what == CodecBase::kWhatFillThisBuffer) {
+ if (what == kWhatFillThisBuffer) {
onFillThisBuffer(msg);
- } else if (what == CodecBase::kWhatDrainThisBuffer) {
+ } else if (what == kWhatDrainThisBuffer) {
if ((mNumOutputBuffersReceived++ % 16) == 0) {
printf(".");
fflush(stdout);
}
onDrainThisBuffer(msg);
- } else if (what == CodecBase::kWhatEOS
- || what == CodecBase::kWhatError) {
- printf((what == CodecBase::kWhatEOS) ? "$\n" : "E\n");
+ } else if (what == kWhatEOS
+ || what == kWhatError) {
+ printf((what == kWhatEOS) ? "$\n" : "E\n");
printStatistics();
(new AMessage(kWhatStop, this))->post();
- } else if (what == CodecBase::kWhatFlushCompleted) {
+ } else if (what == kWhatFlushCompleted) {
mSeekState = SEEK_FLUSH_COMPLETED;
mCodec->signalResume();
(new AMessage(kWhatSeek, this))->post(5000000ll);
- } else if (what == CodecBase::kWhatShutdownCompleted) {
+ } else if (what == kWhatStopCompleted ||
+ what == kWhatReleaseCompleted) {
mDecodeLooper->unregisterHandler(mCodec->id());
if (mDecodeLooper != looper()) {
@@ -619,8 +728,6 @@
return 1;
}
- DataSource::RegisterDefaultSniffers();
-
sp<ALooper> looper = new ALooper;
looper->setName("sf2");
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 2bb35cb..5e3a859 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -965,8 +965,6 @@
}
}
- DataSource::RegisterDefaultSniffers();
-
status_t err = OK;
for (int k = 0; k < argc && err == OK; ++k) {
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 16ff39d..0cba8b9 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -301,8 +301,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
if (argc != 2) {
fprintf(stderr, "Usage: %s filename\n", argv[0]);
return 1;
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
index 270f291..3f0e663 100644
--- a/drm/libmediadrm/Android.mk
+++ b/drm/libmediadrm/Android.mk
@@ -7,19 +7,23 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- Crypto.cpp \
- Drm.cpp \
- DrmSessionManager.cpp \
- SharedLibrary.cpp
+ Crypto.cpp \
+ Drm.cpp \
+ DrmSessionManager.cpp \
+ ICrypto.cpp \
+ IDrm.cpp \
+ IDrmClient.cpp \
+ IMediaDrmService.cpp \
+ SharedLibrary.cpp
LOCAL_SHARED_LIBRARIES := \
- libbinder \
- libcutils \
- libdl \
- liblog \
- libmedia \
- libstagefright \
- libutils
+ libbinder \
+ libcutils \
+ libdl \
+ liblog \
+ libmediautils \
+ libstagefright_foundation \
+ libutils
LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
diff --git a/media/libmedia/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
similarity index 100%
rename from media/libmedia/ICrypto.cpp
rename to drm/libmediadrm/ICrypto.cpp
diff --git a/media/libmedia/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
similarity index 100%
rename from media/libmedia/IDrm.cpp
rename to drm/libmediadrm/IDrm.cpp
diff --git a/media/libmedia/IDrmClient.cpp b/drm/libmediadrm/IDrmClient.cpp
similarity index 100%
rename from media/libmedia/IDrmClient.cpp
rename to drm/libmediadrm/IDrmClient.cpp
diff --git a/media/libmedia/IMediaDrmService.cpp b/drm/libmediadrm/IMediaDrmService.cpp
similarity index 100%
rename from media/libmedia/IMediaDrmService.cpp
rename to drm/libmediadrm/IMediaDrmService.cpp
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index 0750406..d75f71c 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -234,10 +234,6 @@
* POSIX based Decrypt API set for container based DRM
*/
static const int CONTAINER_BASED = 0x02;
- /**
- * Decrypt API for Widevine streams
- */
- static const int WV_BASED = 0x3;
};
/**
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index ea03a90..9719efa 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -75,24 +75,28 @@
static const char * const valueListSeparator;
- String8 toString();
+ String8 toString() const { return toStringImpl(true); }
+ String8 keysToString() const { return toStringImpl(false); }
status_t add(const String8& key, const String8& value);
status_t addInt(const String8& key, const int value);
+ status_t addKey(const String8& key);
status_t addFloat(const String8& key, const float value);
status_t remove(const String8& key);
- status_t get(const String8& key, String8& value);
- status_t getInt(const String8& key, int& value);
- status_t getFloat(const String8& key, float& value);
- status_t getAt(size_t index, String8& key, String8& value);
+ status_t get(const String8& key, String8& value) const;
+ status_t getInt(const String8& key, int& value) const;
+ status_t getFloat(const String8& key, float& value) const;
+ status_t getAt(size_t index, String8& key, String8& value) const;
- size_t size() { return mParameters.size(); }
+ size_t size() const { return mParameters.size(); }
private:
String8 mKeyValuePairs;
KeyedVector <String8, String8> mParameters;
+
+ String8 toStringImpl(bool useValues) const;
};
}; // namespace android
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 63076e9..f7eb397 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -180,7 +180,7 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- int uid = -1,
+ uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL);
@@ -218,7 +218,7 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- int uid = -1,
+ uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL);
@@ -642,7 +642,7 @@
sp<DeathNotifier> mDeathNotifier;
uint32_t mSequence; // incremented for each new IAudioRecord attempt
- int mClientUid;
+ uid_t mClientUid;
pid_t mClientPid;
audio_attributes_t mAttributes;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 399154c..7c5686a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -233,7 +233,7 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
+ uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
@@ -263,7 +263,7 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
+ uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
@@ -309,7 +309,7 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
+ uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
@@ -1130,7 +1130,7 @@
sp<DeathNotifier> mDeathNotifier;
uint32_t mSequence; // incremented for each new IAudioTrack attempt
- int mClientUid;
+ uid_t mClientUid;
pid_t mClientPid;
sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
new file mode 100644
index 0000000..7dd9d40
--- /dev/null
+++ b/include/media/BufferingSettings.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFERING_SETTINGS_H
+#define ANDROID_BUFFERING_SETTINGS_H
+
+#include <binder/Parcelable.h>
+
+namespace android {
+
+enum BufferingMode : int {
+ // Do not support buffering.
+ BUFFERING_MODE_NONE = 0,
+ // Support only time based buffering.
+ BUFFERING_MODE_TIME_ONLY = 1,
+ // Support only size based buffering.
+ BUFFERING_MODE_SIZE_ONLY = 2,
+ // Support both time and size based buffering, time based calculation precedes size based.
+ // Size based calculation will be used only when time information is not available for
+ // the stream.
+ BUFFERING_MODE_TIME_THEN_SIZE = 3,
+ // Number of modes.
+ BUFFERING_MODE_COUNT = 4,
+};
+
+struct BufferingSettings : public Parcelable {
+ static const int kNoWatermark = -1;
+
+ static bool IsValidBufferingMode(int mode);
+
+ BufferingMode mInitialBufferingMode; // for prepare
+ BufferingMode mRebufferingMode; // for playback
+
+ int mInitialWatermarkMs; // time based
+ int mInitialWatermarkKB; // size based
+
+ // When cached data is below this mark, playback will be paused for buffering
+ // till data reach |mRebufferingWatermarkHighMs| or end of stream.
+ int mRebufferingWatermarkLowMs;
+ // When cached data is above this mark, buffering will be paused.
+ int mRebufferingWatermarkHighMs;
+
+ // When cached data is below this mark, playback will be paused for buffering
+ // till data reach |mRebufferingWatermarkHighKB| or end of stream.
+ int mRebufferingWatermarkLowKB;
+ // When cached data is above this mark, buffering will be paused.
+ int mRebufferingWatermarkHighKB;
+
+ BufferingSettings();
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+};
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // ANDROID_BUFFERING_SETTINGS_H
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 0fd8933..f642373 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -23,6 +23,8 @@
#include <utils/KeyedVector.h>
#include <system/audio.h>
+#include <media/IMediaSource.h>
+
// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
// global, and not in android::
struct sockaddr_in;
@@ -37,6 +39,9 @@
struct IMediaHTTPService;
struct AudioPlaybackRate;
struct AVSyncSettings;
+struct BufferingSettings;
+
+typedef IMediaSource::ReadOptions::SeekMode MediaPlayerSeekMode;
class IMediaPlayer: public IInterface
{
@@ -55,6 +60,9 @@
virtual status_t setDataSource(const sp<IDataSource>& source) = 0;
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) = 0;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
virtual status_t prepareAsync() = 0;
virtual status_t start() = 0;
virtual status_t stop() = 0;
@@ -65,7 +73,9 @@
virtual status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
virtual status_t getSyncSettings(AVSyncSettings* sync /* nonnull */,
float* videoFps /* nonnull */) = 0;
- virtual status_t seekTo(int msec) = 0;
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
virtual status_t getCurrentPosition(int* msec) = 0;
virtual status_t getDuration(int* msec) = 0;
virtual status_t reset() = 0;
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 839945c..ec1d4b6 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -52,6 +52,20 @@
kFenceTimeoutMs = 1000
};
+ enum PortMode {
+ kPortModePresetStart = 0,
+ kPortModePresetByteBuffer,
+ kPortModePresetANWBuffer,
+ kPortModePresetSecureBuffer,
+ kPortModePresetEnd,
+
+ kPortModeDynamicStart = 100,
+ kPortModeDynamicANWBuffer, // uses metadata mode kMetadataBufferTypeANWBuffer
+ // or kMetadataBufferTypeGrallocSource
+ kPortModeDynamicNativeHandle, // uses metadata mode kMetadataBufferTypeNativeHandleSource
+ kPortModeDynamicEnd,
+ };
+
struct ComponentInfo {
String8 mName;
List<String8> mRoles;
@@ -90,10 +104,8 @@
virtual status_t setConfig(
OMX_INDEXTYPE index, const void *params, size_t size) = 0;
- // This will set *type to previous metadata buffer type on OMX error (not on binder error), and
- // new metadata buffer type on success.
- virtual status_t storeMetaDataInBuffers(
- OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type = NULL) = 0;
+ virtual status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) = 0;
virtual status_t prepareForAdaptivePlayback(
OMX_U32 portIndex, OMX_BOOL enable,
@@ -103,9 +115,6 @@
OMX_U32 portIndex, OMX_BOOL tunneled,
OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
- virtual status_t enableNativeBuffers(
- OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) = 0;
-
virtual status_t getGraphicBufferUsage(
OMX_U32 port_index, OMX_U32* usage) = 0;
@@ -241,23 +250,6 @@
uint32_t flags = 0);
};
-struct CodecProfileLevel {
- OMX_U32 mProfile;
- OMX_U32 mLevel;
-};
-
-inline static const char *asString(MetadataBufferType i, const char *def = "??") {
- using namespace android;
- switch (i) {
- case kMetadataBufferTypeCameraSource: return "CameraSource";
- case kMetadataBufferTypeGrallocSource: return "GrallocSource";
- case kMetadataBufferTypeANWBuffer: return "ANWBuffer";
- case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
- case kMetadataBufferTypeInvalid: return "Invalid";
- default: return def;
- }
-}
-
} // namespace android
#endif // ANDROID_IOMX_H_
diff --git a/include/media/MediaCodecBuffer.h b/include/media/MediaCodecBuffer.h
index 05aaa14..501c00b 100644
--- a/include/media/MediaCodecBuffer.h
+++ b/include/media/MediaCodecBuffer.h
@@ -58,13 +58,13 @@
sp<AMessage> meta();
sp<AMessage> format();
- virtual sp<MediaCodecBuffer> clone(const sp<AMessage> &format);
+ void setFormat(const sp<AMessage> &format);
private:
MediaCodecBuffer() = delete;
const sp<AMessage> mMeta;
- const sp<AMessage> mFormat;
+ sp<AMessage> mFormat;
const sp<ABuffer> mBuffer;
MediaBufferBase *mMediaBufferBase;
};
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
index 5f2a32d..0682413 100644
--- a/include/media/MediaDefs.h
+++ b/include/media/MediaDefs.h
@@ -59,8 +59,6 @@
extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
-
extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
extern const char *MEDIA_MIMETYPE_TEXT_VTT;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 4977efd..0e815cb 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -30,6 +30,7 @@
#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
#include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
#include <media/Metadata.h>
// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -174,6 +175,15 @@
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ *buffering = BufferingSettings();
+ return OK;
+ }
+ virtual status_t setBufferingSettings(const BufferingSettings& /* buffering */) {
+ return OK;
+ }
+
virtual status_t prepare() = 0;
virtual status_t prepareAsync() = 0;
virtual status_t start() = 0;
@@ -205,7 +215,8 @@
*videoFps = -1.f;
return OK;
}
- virtual status_t seekTo(int msec) = 0;
+ virtual status_t seekTo(
+ int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
virtual status_t getCurrentPosition(int *msec) = 0;
virtual status_t getDuration(int *msec) = 0;
virtual status_t reset() = 0;
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
index 0322b73..89b709c 100644
--- a/include/media/OMXBuffer.h
+++ b/include/media/OMXBuffer.h
@@ -44,7 +44,7 @@
OMXBuffer(const sp<MediaCodecBuffer> &codecBuffer);
// Constructs a buffer of type kBufferTypeSharedMem.
- OMXBuffer(const sp<IMemory> &mem, size_t allottedSize = 0);
+ OMXBuffer(const sp<IMemory> &mem);
// Constructs a buffer of type kBufferTypeANWBuffer.
OMXBuffer(const sp<GraphicBuffer> &gbuf);
@@ -78,7 +78,6 @@
// kBufferTypeSharedMem
sp<IMemory> mMem;
- OMX_U32 mAllottedSize;
// kBufferTypeANWBuffer
sp<GraphicBuffer> mGraphicBuffer;
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
new file mode 100644
index 0000000..ffe4c1f
--- /dev/null
+++ b/include/media/TypeConverter.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TYPE_CONVERTER_H_
+#define ANDROID_TYPE_CONVERTER_H_
+
+#include <string>
+#include <string.h>
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+
+#include "convert.h"
+#include "AudioParameter.h"
+
+namespace android {
+
+struct SampleRateTraits
+{
+ typedef uint32_t Type;
+ typedef SortedVector<Type> Collection;
+};
+struct DeviceTraits
+{
+ typedef audio_devices_t Type;
+ typedef Vector<Type> Collection;
+};
+struct OutputDeviceTraits : public DeviceTraits {};
+struct InputDeviceTraits : public DeviceTraits {};
+struct OutputFlagTraits
+{
+ typedef audio_output_flags_t Type;
+ typedef Vector<Type> Collection;
+};
+struct InputFlagTraits
+{
+ typedef audio_input_flags_t Type;
+ typedef Vector<Type> Collection;
+};
+struct FormatTraits
+{
+ typedef audio_format_t Type;
+ typedef Vector<Type> Collection;
+};
+struct ChannelTraits
+{
+ typedef audio_channel_mask_t Type;
+ typedef SortedVector<Type> Collection;
+};
+struct OutputChannelTraits : public ChannelTraits {};
+struct InputChannelTraits : public ChannelTraits {};
+struct ChannelIndexTraits : public ChannelTraits {};
+struct GainModeTraits
+{
+ typedef audio_gain_mode_t Type;
+ typedef Vector<Type> Collection;
+};
+struct StreamTraits
+{
+ typedef audio_stream_type_t Type;
+ typedef Vector<Type> Collection;
+};
+struct AudioModeTraits
+{
+ typedef audio_mode_t Type;
+ typedef Vector<Type> Collection;
+};
+template <typename T>
+struct DefaultTraits
+{
+ typedef T Type;
+ typedef Vector<Type> Collection;
+};
+
+template <class Traits>
+static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
+ const char *del = AudioParameter::valueListSeparator)
+{
+ char *literal = strdup(str.c_str());
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type value;
+ if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
+ collection.add(value);
+ }
+ }
+ free(literal);
+}
+
+template <class Traits>
+class TypeConverter
+{
+public:
+ static bool toString(const typename Traits::Type &value, std::string &str);
+
+ static bool fromString(const std::string &str, typename Traits::Type &result);
+
+ static void collectionFromString(const std::string &str,
+ typename Traits::Collection &collection,
+ const char *del = AudioParameter::valueListSeparator);
+
+ static uint32_t maskFromString(
+ const std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+ static void maskToString(
+ uint32_t mask, std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+protected:
+ struct Table {
+ const char *literal;
+ typename Traits::Type value;
+ };
+
+ static const Table mTable[];
+};
+
+template <class Traits>
+inline bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
+{
+ for (size_t i = 0; mTable[i].literal; i++) {
+ if (mTable[i].value == value) {
+ str = mTable[i].literal;
+ return true;
+ }
+ }
+ char result[64];
+ snprintf(result, sizeof(result), "Unknown enum value %d", value);
+ str = result;
+ return false;
+}
+
+template <class Traits>
+inline bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
+{
+ for (size_t i = 0; mTable[i].literal; i++) {
+ if (strcmp(mTable[i].literal, str.c_str()) == 0) {
+ ALOGV("stringToEnum() found %s", mTable[i].literal);
+ result = mTable[i].value;
+ return true;
+ }
+ }
+ return false;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::collectionFromString(const std::string &str,
+ typename Traits::Collection &collection,
+ const char *del)
+{
+ char *literal = strdup(str.c_str());
+
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type value;
+ if (fromString(cstr, value)) {
+ collection.add(value);
+ }
+ }
+ free(literal);
+}
+
+template <class Traits>
+inline uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
+{
+ char *literal = strdup(str.c_str());
+ uint32_t value = 0;
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type type;
+ if (fromString(cstr, type)) {
+ value |= static_cast<uint32_t>(type);
+ }
+ }
+ free(literal);
+ return value;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
+{
+ if (mask != 0) {
+ bool first_flag = true;
+ for (size_t i = 0; mTable[i].literal; i++) {
+ if (mTable[i].value != 0 && (mask & mTable[i].value) == mTable[i].value) {
+ if (!first_flag) str += del;
+ first_flag = false;
+ str += mTable[i].literal;
+ }
+ }
+ } else {
+ toString(static_cast<typename Traits::Type>(0), str);
+ }
+}
+
+typedef TypeConverter<OutputDeviceTraits> OutputDeviceConverter;
+typedef TypeConverter<InputDeviceTraits> InputDeviceConverter;
+typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
+typedef TypeConverter<InputFlagTraits> InputFlagConverter;
+typedef TypeConverter<FormatTraits> FormatConverter;
+typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
+typedef TypeConverter<InputChannelTraits> InputChannelConverter;
+typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
+typedef TypeConverter<GainModeTraits> GainModeConverter;
+typedef TypeConverter<StreamTraits> StreamTypeConverter;
+typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice);
+
+SampleRateTraits::Collection samplingRatesFromString(
+ const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);
+
+FormatTraits::Collection formatsFromString(
+ const std::string &formats, const char *del = AudioParameter::valueListSeparator);
+
+audio_format_t formatFromString(
+ const std::string &literalFormat, audio_format_t defaultFormat = AUDIO_FORMAT_DEFAULT);
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels);
+
+ChannelTraits::Collection channelMasksFromString(
+ const std::string &channels, const char *del = AudioParameter::valueListSeparator);
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+ const std::string &inChannels, const char *del = AudioParameter::valueListSeparator);
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+ const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
+
+}; // namespace android
+
+#endif /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/services/audiopolicy/utilities/convert/convert.h b/include/media/convert.h
similarity index 100%
rename from services/audiopolicy/utilities/convert/convert.h
rename to include/media/convert.h
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 389ec01..be34d02 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -233,7 +233,9 @@
float* videoFps /* nonnull */);
status_t getVideoWidth(int *w);
status_t getVideoHeight(int *h);
- status_t seekTo(int msec);
+ status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
status_t getCurrentPosition(int *msec);
status_t getDuration(int *msec);
status_t reset();
@@ -257,7 +259,7 @@
private:
void clear_l();
- status_t seekTo_l(int msec);
+ status_t seekTo_l(int msec, MediaPlayerSeekMode mode);
status_t prepareAsync_l();
status_t getDuration_l(int *msec);
status_t attachNewPlayer(const sp<IMediaPlayer>& player);
@@ -274,7 +276,9 @@
void* mCookie;
media_player_states mCurrentState;
int mCurrentPosition;
+ MediaPlayerSeekMode mCurrentSeekMode;
int mSeekPosition;
+ MediaPlayerSeekMode mSeekMode;
bool mPrepareSync;
status_t mPrepareStatus;
audio_stream_type_t mStreamType;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 13ceeb6..8fc2809 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -277,9 +277,7 @@
bool mChannelMaskPresent;
int32_t mChannelMask;
unsigned mDequeueCounter;
- MetadataBufferType mInputMetadataType;
- MetadataBufferType mOutputMetadataType;
- bool mLegacyAdaptiveExperiment;
+ IOMX::PortMode mPortMode[2];
int32_t mMetadataBuffersToSubmit;
size_t mNumUndequeuedBuffers;
sp<DataConverter> mConverter[2];
@@ -303,6 +301,7 @@
status_t freeBuffer(OMX_U32 portIndex, size_t i);
status_t handleSetSurface(const sp<Surface> &surface);
+ status_t setPortMode(int32_t portIndex, IOMX::PortMode mode);
status_t setupNativeWindowSizeFormatAndUsage(
ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
bool reconnect);
@@ -319,11 +318,11 @@
BufferInfo *dequeueBufferFromNativeWindow();
inline bool storingMetadataInDecodedBuffers() {
- return mOutputMetadataType >= 0 && !mIsEncoder;
+ return (mPortMode[kPortIndexOutput] == IOMX::kPortModeDynamicANWBuffer) && !mIsEncoder;
}
- inline bool usingMetadataOnEncoderOutput() {
- return mOutputMetadataType >= 0 && mIsEncoder;
+ inline bool usingSecureBufferOnEncoderOutput() {
+ return (mPortMode[kPortIndexOutput] == IOMX::kPortModePresetSecureBuffer) && mIsEncoder;
}
BufferInfo *findBufferByID(
@@ -494,8 +493,6 @@
status_t setupErrorCorrectionParameters();
- status_t initNativeWindow();
-
// Returns true iff all buffers on the given port have status
// OWNED_BY_US or OWNED_BY_NATIVE_WINDOW.
bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 2ec89a4..f20c2cd 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -89,6 +89,8 @@
int64_t mPrevSampleTimeUs;
int64_t mInitialReadTimeUs;
int64_t mNumFramesReceived;
+ int64_t mNumFramesSkipped;
+ int64_t mNumFramesLost;
int64_t mNumClientOwnedBuffers;
List<MediaBuffer * > mBuffersReceived;
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index d8c43a4..25b8bf8 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -18,12 +18,15 @@
#define CODEC_BASE_H_
+#include <memory>
+
#include <stdint.h>
#define STRINGIFY_ENUMS
#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
+#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/hardware/HardwareAPI.h>
@@ -34,32 +37,159 @@
namespace android {
+class BufferProducerWrapper;
class MediaCodecBuffer;
struct PersistentSurface;
+struct RenderedFrameInfo;
class Surface;
struct CodecBase : public AHandler, /* static */ ColorUtils {
- enum {
- kWhatFillThisBuffer = 'fill',
- kWhatDrainThisBuffer = 'drai',
- kWhatEOS = 'eos ',
- kWhatShutdownCompleted = 'scom',
- kWhatFlushCompleted = 'fcom',
- kWhatError = 'erro',
- kWhatComponentAllocated = 'cAll',
- kWhatComponentConfigured = 'cCon',
- kWhatInputSurfaceCreated = 'isfc',
- kWhatInputSurfaceAccepted = 'isfa',
- kWhatSignaledInputEOS = 'seos',
- kWhatBuffersAllocated = 'allc',
- kWhatOutputFramesRendered = 'outR',
+ struct PortDescription;
+
+ /**
+ * This interface defines events firing from CodecBase back to MediaCodec.
+ * All methods must not block.
+ */
+ class Callback {
+ public:
+ virtual ~Callback() = default;
+
+ /**
+ * Request MediaCodec to fill the specified input buffer.
+ *
+ * @param bufferId ID of the buffer, assigned by underlying component.
+ * @param buffer a buffer to be filled.
+ * @param reply a message to post once MediaCodec has filled the
+ * buffer.
+ */
+ virtual void fillThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> &reply) = 0;
+ /**
+ * Request MediaCodec to drain the specified output buffer.
+ *
+ * @param bufferId ID of the buffer, assigned by underlying component.
+ * @param buffer a buffer to be filled.
+ * @param flags flags associated with this buffer (e.g. EOS).
+ * @param reply a message to post once MediaCodec has filled the
+ * buffer.
+ */
+ virtual void drainThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ int32_t flags,
+ const sp<AMessage> &reply) = 0;
+ /**
+ * Notify MediaCodec for seeing an output EOS.
+ *
+ * @param err the underlying cause of the EOS. If the value is neither
+ * OK nor ERROR_END_OF_STREAM, the EOS is declared
+ * prematurely for that error.
+ */
+ virtual void onEos(status_t err) = 0;
+ /**
+ * Notify MediaCodec that stop operation is complete.
+ */
+ virtual void onStopCompleted() = 0;
+ /**
+ * Notify MediaCodec that release operation is complete.
+ */
+ virtual void onReleaseCompleted() = 0;
+ /**
+ * Notify MediaCodec that flush operation is complete.
+ */
+ virtual void onFlushCompleted() = 0;
+ /**
+ * Notify MediaCodec that an error is occurred.
+ *
+ * @param err an error code for the occurred error.
+ * @param actionCode an action code for severity of the error.
+ */
+ virtual void onError(status_t err, enum ActionCode actionCode) = 0;
+ /**
+ * Notify MediaCodec that the underlying component is allocated.
+ *
+ * @param componentName the unique name of the component specified in
+ * MediaCodecList.
+ */
+ virtual void onComponentAllocated(const char *componentName) = 0;
+ /**
+ * Notify MediaCodec that the underlying component is configured.
+ *
+ * @param inputFormat an input format at configure time.
+ * @param outputFormat an output format at configure time.
+ */
+ virtual void onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) = 0;
+ /**
+ * Notify MediaCodec that the input surface is created.
+ *
+ * @param inputFormat an input format at surface creation. Formats
+ * could change from the previous state as a result
+ * of creating a surface.
+ * @param outputFormat an output format at surface creation.
+ * @param inputSurface the created surface.
+ */
+ virtual void onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) = 0;
+ /**
+ * Notify MediaCodec that the input surface creation is failed.
+ *
+ * @param err an error code of the cause.
+ */
+ virtual void onInputSurfaceCreationFailed(status_t err) = 0;
+ /**
+ * Notify MediaCodec that the component accepted the provided input
+ * surface.
+ *
+ * @param inputFormat an input format at surface assignment. Formats
+ * could change from the previous state as a result
+ * of assigning a surface.
+ * @param outputFormat an output format at surface assignment.
+ */
+ virtual void onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) = 0;
+ /**
+ * Notify MediaCodec that the component declined the provided input
+ * surface.
+ *
+ * @param err an error code of the cause.
+ */
+ virtual void onInputSurfaceDeclined(status_t err) = 0;
+ /**
+ * Noitfy MediaCodec that the requested input EOS is sent to the input
+ * surface.
+ *
+ * @param err an error code returned from the surface. If there is no
+ * input surface, the value is INVALID_OPERATION.
+ */
+ virtual void onSignaledInputEOS(status_t err) = 0;
+ /**
+ * Notify MediaCodec with the allocated buffers.
+ *
+ * @param portIndex zero for input port, one for output port.
+ * @param portDesc a PortDescription object containing allocated
+ * buffers.
+ */
+ virtual void onBuffersAllocated(int32_t portIndex, const sp<PortDescription> &portDesc) = 0;
+ /**
+ * Notify MediaCodec that output frames are rendered with information on
+ * those frames.
+ *
+ * @param done a list of rendered frames.
+ */
+ virtual void onOutputFramesRendered(const std::list<RenderedFrameInfo> &done) = 0;
};
enum {
kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
};
- virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
+ void setCallback(std::shared_ptr<Callback> &&callback);
virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
virtual void initiateConfigureComponent(const sp<AMessage> &msg) = 0;
@@ -106,6 +236,8 @@
CodecBase();
virtual ~CodecBase();
+ std::shared_ptr<Callback> mCallback;
+
private:
DISALLOW_EVIL_CONSTRUCTORS(CodecBase);
};
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 0254545..3479f76 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -102,17 +102,6 @@
////////////////////////////////////////////////////////////////////////////
- bool sniff(String8 *mimeType, float *confidence, sp<AMessage> *meta);
-
- // The sniffer can optionally fill in "meta" with an AMessage containing
- // a dictionary of values that helps the corresponding extractor initialize
- // its state without duplicating effort already exerted by the sniffer.
- typedef bool (*SnifferFunc)(
- const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *meta);
-
- static void RegisterDefaultSniffers();
-
// for DRM
virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL) {
return NULL;
@@ -131,12 +120,6 @@
virtual ~DataSource() {}
private:
- static Mutex gSnifferMutex;
- static List<SnifferFunc> gSniffers;
- static bool gSniffersRegistered;
-
- static void RegisterSniffer_l(SnifferFunc func);
-
DataSource(const DataSource &);
DataSource &operator=(const DataSource &);
};
diff --git a/include/media/stagefright/FrameRenderTracker.h b/include/media/stagefright/FrameRenderTracker.h
index 8396657..6c572b8 100644
--- a/include/media/stagefright/FrameRenderTracker.h
+++ b/include/media/stagefright/FrameRenderTracker.h
@@ -32,58 +32,61 @@
class Fence;
class GraphicBuffer;
+// Tracks the render information about a frame. Frames go through several states while
+// the render information is tracked:
+//
+// 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
+// queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
+// Key characteristics: mFence is not NULL and mIndex is negative.
+//
+// 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
+// Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
+// invalid.
+//
+// 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
+// Key characteristics: mFence is NULL.
+//
+struct RenderedFrameInfo {
+ // set by client during onFrameQueued or onFrameRendered
+ int64_t getMediaTimeUs() const { return mMediaTimeUs; }
+
+ // -1 if frame is not yet rendered
+ nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
+
+ // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
+ ssize_t getIndex() const { return mIndex; }
+
+ // creates information for a queued frame
+ RenderedFrameInfo(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer,
+ const sp<Fence> &fence)
+ : mMediaTimeUs(mediaTimeUs),
+ mRenderTimeNs(-1),
+ mIndex(-1),
+ mGraphicBuffer(graphicBuffer),
+ mFence(fence) {
+ }
+
+ // creates information for a frame rendered on a tunneled surface
+ RenderedFrameInfo(int64_t mediaTimeUs, nsecs_t renderTimeNs)
+ : mMediaTimeUs(mediaTimeUs),
+ mRenderTimeNs(renderTimeNs),
+ mIndex(-1),
+ mGraphicBuffer(NULL),
+ mFence(NULL) {
+ }
+
+private:
+ int64_t mMediaTimeUs;
+ nsecs_t mRenderTimeNs;
+ ssize_t mIndex; // to be used by client
+ sp<GraphicBuffer> mGraphicBuffer;
+ sp<Fence> mFence;
+
+ friend class FrameRenderTracker;
+};
+
struct FrameRenderTracker {
- // Tracks the render information about a frame. Frames go through several states while
- // the render information is tracked:
- //
- // 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
- // queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
- // Key characteristics: mFence is not NULL and mIndex is negative.
- //
- // 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
- // Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
- // invalid.
- //
- // 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
- // Key characteristics: mFence is NULL.
- //
- struct Info {
- // set by client during onFrameQueued or onFrameRendered
- int64_t getMediaTimeUs() const { return mMediaTimeUs; }
-
- // -1 if frame is not yet rendered
- nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
-
- // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
- ssize_t getIndex() const { return mIndex; }
-
- // creates information for a queued frame
- Info(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence)
- : mMediaTimeUs(mediaTimeUs),
- mRenderTimeNs(-1),
- mIndex(-1),
- mGraphicBuffer(graphicBuffer),
- mFence(fence) {
- }
-
- // creates information for a frame rendered on a tunneled surface
- Info(int64_t mediaTimeUs, nsecs_t renderTimeNs)
- : mMediaTimeUs(mediaTimeUs),
- mRenderTimeNs(renderTimeNs),
- mIndex(-1),
- mGraphicBuffer(NULL),
- mFence(NULL) {
- }
-
- private:
- int64_t mMediaTimeUs;
- nsecs_t mRenderTimeNs;
- ssize_t mIndex; // to be used by client
- sp<GraphicBuffer> mGraphicBuffer;
- sp<Fence> mFence;
-
- friend class FrameRenderTracker;
- };
+ typedef RenderedFrameInfo Info;
FrameRenderTracker();
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 8f0eaa7..d7fe23c 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -36,9 +36,8 @@
MPEG4Writer(int fd);
// Limitations
- // 1. No more than 2 tracks can be added
- // 2. Only video or audio source can be added
- // 3. No more than one video and/or one audio source can be added.
+ // No more than one video and/or one audio source can be added, but
+ // multiple metadata sources can be added.
virtual status_t addSource(const sp<IMediaSource> &source);
// Returns INVALID_OPERATION if there is no source or track.
@@ -98,6 +97,8 @@
int64_t mStartTimestampUs;
int mLatitudex10000;
int mLongitudex10000;
+ bool mHasAudioTrack;
+ bool mHasVideoTrack;
bool mAreGeoTagsAvailable;
int32_t mStartTimeOffsetMs;
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 2c31a0d..b0243ec 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -64,14 +64,15 @@
};
static const pid_t kNoPid = -1;
+ static const uid_t kNoUid = -1;
static sp<MediaCodec> CreateByType(
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
- pid_t pid = kNoPid);
+ pid_t pid = kNoPid, uid_t uid = kNoUid);
static sp<MediaCodec> CreateByComponentName(
const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
- pid_t pid = kNoPid);
+ pid_t pid = kNoPid, uid_t uid = kNoUid);
static sp<PersistentSurface> CreatePersistentInputSurface();
@@ -150,8 +151,6 @@
status_t getOutputFormat(sp<AMessage> *format) const;
status_t getInputFormat(sp<AMessage> *format) const;
- status_t getWidevineLegacyBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
-
status_t getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
status_t getOutputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
@@ -286,6 +285,7 @@
};
State mState;
+ uid_t mUid;
bool mReleasedByResourceManager;
sp<ALooper> mLooper;
sp<ALooper> mCodecLooper;
@@ -345,7 +345,7 @@
bool mHaveInputSurface;
bool mHavePendingInputBuffers;
- MediaCodec(const sp<ALooper> &looper, pid_t pid);
+ MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
static sp<CodecBase> GetCodecBase(const AString &name, bool nameIsType = false);
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 6bf8c9e..e5ee72e 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -80,6 +80,24 @@
private:
bool mIsDrm;
+ typedef bool (*SnifferFunc)(
+ const sp<DataSource> &source, String8 *mimeType,
+ float *confidence, sp<AMessage> *meta);
+
+ static Mutex gSnifferMutex;
+ static List<SnifferFunc> gSniffers;
+ static bool gSniffersRegistered;
+
+ // The sniffer can optionally fill in "meta" with an AMessage containing
+ // a dictionary of values that helps the corresponding extractor initialize
+ // its state without duplicating effort already exerted by the sniffer.
+ static void RegisterSniffer_l(SnifferFunc func);
+
+ static bool sniff(const sp<DataSource> &source,
+ String8 *mimeType, float *confidence, sp<AMessage> *meta);
+
+ static void RegisterDefaultSniffers();
+
MediaExtractor(const MediaExtractor &);
MediaExtractor &operator=(const MediaExtractor &);
};
diff --git a/include/media/stagefright/MediaFilter.h b/include/media/stagefright/MediaFilter.h
index 0e39431..a0e580b 100644
--- a/include/media/stagefright/MediaFilter.h
+++ b/include/media/stagefright/MediaFilter.h
@@ -28,8 +28,6 @@
struct MediaFilter : public CodecBase {
MediaFilter();
- virtual void setNotificationMessage(const sp<AMessage> &msg);
-
virtual void initiateAllocateComponent(const sp<AMessage> &msg);
virtual void initiateConfigureComponent(const sp<AMessage> &msg);
virtual void initiateCreateInputSurface();
@@ -120,7 +118,6 @@
int32_t mColorFormatIn, mColorFormatOut;
size_t mMaxInputSize, mMaxOutputSize;
int32_t mGeneration;
- sp<AMessage> mNotify;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index 03e2185..a8aca5a 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -108,7 +108,6 @@
sp<DataSource> mDataSource;
sp<IMediaExtractor> mImpl;
- bool mIsWidevineExtractor;
Vector<TrackInfo> mSelectedTracks;
int64_t mTotalBitrate; // in bits/sec
diff --git a/include/media/stagefright/foundation/Flagged.h b/include/media/stagefright/foundation/Flagged.h
new file mode 100644
index 0000000..bf0afbf
--- /dev/null
+++ b/include/media/stagefright/foundation/Flagged.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+#define STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+/**
+ * Flagged<T, Flag> is basically a specialized std::pair<Flag, T> that automatically optimizes out
+ * the flag if the wrapped type T is already flagged and we can combine the outer and inner flags.
+ *
+ * Flags can be queried/manipulated via flags() an setFlags(Flags). The wrapped value can be
+ * accessed via get(). This template is meant to be inherited by other utility/wrapper classes
+ * that need to store integral information along with the value.
+ *
+ * Users must specify the used bits (MASK) in the flags. Flag getters and setters will enforce this
+ * mask. _Flagged_helper::minMask<Flag> is provided to easily calculate a mask for a max value.
+ *
+ * E.g. adding a safe flag can be achieved like this:
+ *
+ *
+ * enum SafeFlags : uint32_t {
+ * kUnsafe,
+ * kSafe,
+ * kSafeMask = _Flagged_helper::minMask(kSafe),
+ * };
+ * typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+ *
+ * safeInt32 a;
+ * a.setFlags(kSafe);
+ * a.get() = 15;
+ * EXPECT_EQ(a.flags(), kSafe);
+ * EXPECT_EQ(a.get(), 15);
+ *
+ *
+ * Flagged also supports lazy or calculated wrapping of already flagged types. Lazy wrapping is
+ * provided automatically (flags are automatically shared if possible, e.g. mask is shifted
+ * automatically to not overlap with used bits of the wrapped type's flags, and fall back to
+ * unshared version of the template.):
+ *
+ * enum OriginFlags : uint32_t {
+ * kUnknown,
+ * kConst,
+ * kCalculated,
+ * kComponent,
+ * kApplication,
+ * kFile,
+ * kBinder,
+ * kOriginMask = _Flagged_helper::minMask(kBinder),
+ * };
+ * typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+ * trackedSafeInt32;
+ *
+ * static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+ *
+ * trackedSafeInt32 b(kConst, kSafe, 1);
+ * EXPECT_EQ(b.flags(), kConst);
+ * EXPECT_EQ(b.get().flags(), kSafe);
+ * EXPECT_EQ(b.get().get(), 1);
+ * b.setFlags(kCalculated);
+ * b.get().setFlags(overflow ? kUnsafe : kSafe);
+ *
+ * One can also choose to share some flag-bits with the wrapped class:
+ *
+ * enum ValidatedFlags : uint32_t {
+ * kUnsafeV = kUnsafe,
+ * kSafeV = kSafe,
+ * kValidated = kSafe | 2,
+ * kSharedMaskV = kSafeMask,
+ * kValidatedMask = _Flagged_helper::minMask(kValidated),
+ * };
+ * typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+ *
+ * validatedInt32 v(kUnsafeV, kSafe, 10);
+ * EXPECT_EQ(v.flags(), kUnsafeV);
+ * EXPECT_EQ(v.get().flags(), kUnsafe); // !kUnsafeV overrides kSafe
+ * EXPECT_EQ(v.get().get(), 10);
+ * v.setFlags(kValidated);
+ * EXPECT_EQ(v.flags(), kValidated);
+ * EXPECT_EQ(v.get().flags(), kSafe);
+ * v.get().setFlags(kUnsafe);
+ * EXPECT_EQ(v.flags(), 2); // NOTE: sharing masks with enums allows strange situations to occur
+ */
+
+/**
+ * Helper class for Flagged support. Encapsulates common utilities used by all
+ * templated classes.
+ */
+struct _Flagged_helper {
+ /**
+ * Calculates the value with a given number of top-most bits set.
+ *
+ * This method may be called with a signed flag.
+ *
+ * \param num number of bits to set. This must be between 0 and the number of bits in Flag.
+ *
+ * \return the value where only the given number of top-most bits are set.
+ */
+ template<typename Flag>
+ static constexpr Flag topBits(int num) {
+ return Flag(num > 0 ?
+ ~((Flag(1) << (sizeof(Flag) * 8 - is_signed_integral<Flag>::value - num)) - 1) :
+ 0);
+ }
+
+ /**
+ * Calculates the minimum mask required to cover a value. Used with the maximum enum value for
+ * an unsigned flag.
+ *
+ * \param maxValue maximum value to cover
+ * \param shift DO NO USE. used internally
+ *
+ * \return mask that can be used that covers the maximum value.
+ */
+ template<typename Flag>
+ static constexpr Flag minMask(Flag maxValue, int shift=sizeof(Flag) * 4) {
+ static_assert(is_unsigned_integral<Flag>::value,
+ "this method only makes sense for unsigned flags");
+ return shift ? minMask<Flag>(Flag(maxValue | (maxValue >> shift)), shift >> 1) : maxValue;
+ }
+
+ /**
+ * Returns a value left-shifted by an argument as a potential constexpr.
+ *
+ * This method helps around the C-language limitation, when left-shift of a negative value with
+ * even 0 cannot be a constexpr.
+ *
+ * \param value value to shift
+ * \param shift amount of shift
+ * \returns the shifted value as an integral type
+ */
+ template<typename Flag, typename IntFlag = typename underlying_integral_type<Flag>::type>
+ static constexpr IntFlag lshift(Flag value, int shift) {
+ return shift ? value << shift : value;
+ }
+
+private:
+
+ /**
+ * Determines whether mask can be combined with base-mask for a given left shift.
+ *
+ * \param mask desired mask
+ * \param baseMask mask used by T or 0 if T is not flagged by Flag
+ * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+ * \param shift desired left shift to be used for mask
+ * \param baseShift left shift used by T or 0 if T is not flagged by Flag
+ * \param effectiveMask effective mask used by T or 0 if T is not flagged by Flag
+ *
+ * \return bool whether mask can be combined with baseMask using the desired values.
+ */
+ template<typename Flag, typename IntFlag=typename underlying_integral_type<Flag>::type>
+ static constexpr bool canCombine(
+ Flag mask, IntFlag baseMask, Flag sharedMask, int shift,
+ int baseShift, IntFlag effectiveMask) {
+ return
+ // verify that shift is valid and mask can be shifted
+ shift >= 0 && (mask & topBits<Flag>(shift)) == 0 &&
+
+ // verify that base mask is part of effective mask (sanity check on arguments)
+ (baseMask & ~(effectiveMask >> baseShift)) == 0 &&
+
+ // if sharing masks, shift must be the base's shift.
+ // verify that shared mask is the overlap of base mask and mask
+ (sharedMask ?
+ ((sharedMask ^ (baseMask & mask)) == 0 &&
+ shift == baseShift) :
+
+
+ // otherwise, verify that there is no overlap between mask and base's effective mask
+ (mask & (effectiveMask >> shift)) == 0);
+ }
+
+
+ /**
+ * Calculates the minimum (left) shift required to combine a mask with the mask of an
+ * underlying type (T, also flagged by Flag).
+ *
+ * \param mask desired mask
+ * \param baseMask mask used by T or 0 if T is not flagged by Flag
+ * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+ * \param baseShift left shift used by T
+ * \param effectiveMask effective mask used by T
+ *
+ * \return a non-negative minimum left shift value if mask can be combined with baseMask,
+ * or -1 if the masks cannot be combined. -2 if the input is invalid.
+ */
+ template<typename Flag,
+ typename IntFlag = typename underlying_integral_type<Flag>::type>
+ static constexpr int getShift(
+ Flag mask, IntFlag baseMask, Flag sharedMask, int baseShift, IntFlag effectiveMask) {
+ return
+ // baseMask must be part of the effective mask
+ (baseMask & ~(effectiveMask >> baseShift)) ? -2 :
+
+ // if sharing masks, shift must be base's shift. verify that shared mask is part of
+ // base mask and mask, and that desired mask still fits with base's shift value
+ sharedMask ?
+ (canCombine(mask, baseMask, sharedMask, baseShift /* shift */,
+ baseShift, effectiveMask) ? baseShift : -1) :
+
+ // otherwise, see if 0-shift works
+ ((mask & effectiveMask) == 0) ? 0 :
+
+ // otherwise, verify that mask can be shifted up
+ ((mask & topBits<Flag>(1)) || (mask < 0)) ? -1 :
+
+ incShift(getShift(Flag(mask << 1), baseMask /* unused */, sharedMask /* 0 */,
+ baseShift /* unused */, effectiveMask));
+ }
+
+ /**
+ * Helper method that increments a non-negative (shift) value.
+ *
+ * This method is used to make it easier to create a constexpr for getShift.
+ *
+ * \param shift (shift) value to increment
+ *
+ * \return original shift if it was negative; otherwise, the shift incremented by one.
+ */
+ static constexpr int incShift(int shift) {
+ return shift + (shift >= 0);
+ }
+
+#ifdef FRIEND_TEST
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+public:
+ /**
+ * Base class for all Flagged<T, Flag> classes.
+ *
+ * \note flagged types do not have a member variable for the mask used by the type. As such,
+ * they should be be cast to this base class.
+ *
+ * \todo can we replace this base class check with a static member check to remove possibility
+ * of cast?
+ */
+ template<typename Flag>
+ struct base {};
+
+ /**
+ * Type support utility that retrieves the mask of a class (T) if it is a type flagged by
+ * Flag (e.g. Flagged<T, Flag>).
+ *
+ * \note This retrieves 0 if T is a flagged class, that is not flagged by Flag or an equivalent
+ * underlying type.
+ *
+ * Generic implementation for a non-flagged class.
+ */
+ template<
+ typename T, typename Flag,
+ bool=std::is_base_of<base<typename underlying_integral_type<Flag>::type>, T>::value>
+ struct mask_of {
+ using IntFlag = typename underlying_integral_type<Flag>::type;
+ static constexpr IntFlag value = Flag(0); ///< mask of a potentially flagged class
+ static constexpr int shift = 0; ///<left shift of flags in a potentially flagged class
+ static constexpr IntFlag effective_value = IntFlag(0); ///<effective mask of flagged class
+ };
+
+ /**
+ * Type support utility that calculates the minimum (left) shift required to combine a mask
+ * with the mask of an underlying type T also flagged by Flag.
+ *
+ * \note if T is not flagged, not flagged by Flag, or the masks cannot be combined due to
+ * incorrect sharing or the flags not having enough bits, the minimum is -1.
+ *
+ * \param MASK desired mask
+ * \param SHARED_MASK desired shared mask (if this is non-0, T must be an type flagged by
+ * Flag with a mask that has exactly these bits common with MASK)
+ */
+ template<typename T, typename Flag, Flag MASK, Flag SHARED_MASK>
+ struct min_shift {
+ /// minimum (left) shift required, or -1 if masks cannot be combined
+ static constexpr int value =
+ getShift(MASK, mask_of<T, Flag>::value, SHARED_MASK,
+ mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+ };
+
+ /**
+ * Type support utility that calculates whether the flags of T can be combined with MASK.
+ *
+ * \param MASK desired mask
+ * \param SHARED_MASK desired shared mask (if this is non-0, T MUST be an type flagged by
+ * Flag with a mask that has exactly these bits common with MASK)
+ */
+ template<
+ typename T, typename Flag, Flag MASK,
+ Flag SHARED_MASK=Flag(0),
+ int SHIFT=min_shift<T, Flag, MASK, SHARED_MASK>::value>
+ struct can_combine {
+ using IntFlag = typename underlying_integral_type<Flag>::type;
+ /// true if this mask can be combined with T's existing flag. false otherwise.
+ static constexpr bool value =
+ std::is_base_of<base<IntFlag>, T>::value
+ && canCombine(MASK, mask_of<T, Flag>::value, SHARED_MASK, SHIFT,
+ mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+ };
+};
+
+/**
+ * Template specialization for the case when T is flagged by Flag or a compatible type.
+ */
+template<typename T, typename Flag>
+struct _Flagged_helper::mask_of<T, Flag, true> {
+ using IntType = typename underlying_integral_type<Flag>::type;
+ static constexpr IntType value = T::sFlagMask;
+ static constexpr int shift = T::sFlagShift;
+ static constexpr IntType effective_value = T::sEffectiveMask;
+};
+
+/**
+ * Main Flagged template that adds flags to an object of another type (in essence, creates a pair)
+ *
+ * Flag must be an integral type (enums are allowed).
+ *
+ * \note We could make SHARED_MASK be a boolean as it must be either 0 or MASK & base's mask, but we
+ * want it to be spelled out for safety.
+ *
+ * \param T type of object wrapped
+ * \param Flag type of flag
+ * \param MASK mask for the bits used in flag (before any shift)
+ * \param SHARED_MASK optional mask to be shared with T (if this is not zero, SHIFT must be 0, and
+ * it must equal to MASK & T's mask)
+ * \param SHIFT optional left shift for MASK to combine with T's mask (or -1, if masks should not
+ * be combined.)
+ */
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK=(Flag)0,
+ int SHIFT=_Flagged_helper::min_shift<T, Flag, MASK, SHARED_MASK>::value,
+ typename IntFlag=typename underlying_integral_type<Flag>::type,
+ bool=_Flagged_helper::can_combine<T, IntFlag, MASK, SHARED_MASK, SHIFT>::value>
+class Flagged : public _Flagged_helper::base<IntFlag> {
+ static_assert(SHARED_MASK == 0,
+ "shared mask can only be used with common flag types "
+ "and must be part of mask and mask of base type");
+ static_assert((_Flagged_helper::topBits<Flag>(SHIFT) & MASK) == 0, "SHIFT overflows MASK");
+
+ static constexpr Flag sFlagMask = MASK; ///< the mask
+ static constexpr int sFlagShift = SHIFT > 0 ? SHIFT : 0; ///< the left shift applied to flags
+
+ friend struct _Flagged_helper;
+#ifdef FRIEND_TEST
+ static constexpr bool sFlagCombined = false;
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+ T mValue; ///< wrapped value
+ IntFlag mFlags; ///< flags
+
+protected:
+ /// The effective combined mask used by this class and any wrapped classes if the flags are
+ /// combined.
+ static constexpr IntFlag sEffectiveMask = _Flagged_helper::lshift(MASK, sFlagShift);
+
+ /**
+ * Helper method used by subsequent flagged wrappers to query flags. Returns the
+ * flags for a particular mask and left shift.
+ *
+ * \param mask bitmask to use
+ * \param shift left shifts to use
+ *
+ * \return the requested flags
+ */
+ inline constexpr IntFlag getFlagsHelper(IntFlag mask, int shift) const {
+ return (mFlags >> shift) & mask;
+ }
+
+ /**
+ * Helper method used by subsequent flagged wrappers to apply combined flags. Sets the flags
+ * in the bitmask using a particulare left shift.
+ *
+ * \param mask bitmask to use
+ * \param shift left shifts to use
+ * \param flags flags to update (any flags within the bitmask are updated to their value in this
+ * argument)
+ */
+ inline void setFlagsHelper(IntFlag mask, int shift, IntFlag flags) {
+ mFlags = Flag((mFlags & ~(mask << shift)) | ((flags & mask) << shift));
+ }
+
+public:
+ /**
+ * Wrapper around base class constructor. These take the flags as their first
+ * argument and pass the rest of the arguments to the base class constructor.
+ *
+ * \param flags initial flags
+ */
+ template<typename ...Args>
+ constexpr Flagged(Flag flags, Args... args)
+ : mValue(std::forward<Args>(args)...),
+ mFlags(Flag(_Flagged_helper::lshift(flags & sFlagMask, sFlagShift))) { }
+
+ /** Gets the wrapped value as const. */
+ inline constexpr const T &get() const { return mValue; }
+
+ /** Gets the wrapped value. */
+ inline T &get() { return mValue; }
+
+ /** Gets the flags. */
+ constexpr Flag flags() const {
+ return Flag(getFlagsHelper(sFlagMask, sFlagShift));
+ }
+
+ /** Sets the flags. */
+ void setFlags(Flag flags) {
+ setFlagsHelper(sFlagMask, sFlagShift, flags);
+ }
+};
+
+/*
+ * TRICKY: we cannot implement the specialization as:
+ *
+ * class Flagged : base<Flag> {
+ * T value;
+ * };
+ *
+ * Because T also inherits from base<Flag> and this runs into a compiler bug where
+ * sizeof(Flagged) > sizeof(T).
+ *
+ * Instead, we must inherit directly from the wrapped class
+ *
+ */
+#if 0
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, true> : public _Flagged_helper::base<Flag> {
+private:
+ T mValue;
+};
+#else
+/**
+ * Specialization for the case when T is derived from Flagged<U, Flag> and flags can be combined.
+ */
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT, typename IntFlag>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, IntFlag, true> : private T {
+ static_assert(is_integral_or_enum<Flag>::value, "flag must be integer or enum");
+
+ static_assert(SHARED_MASK == 0 || SHIFT == 0, "cannot overlap masks when using SHIFT");
+ static_assert((SHARED_MASK & ~MASK) == 0, "shared mask must be part of the mask");
+ static_assert((SHARED_MASK & ~T::sEffectiveMask) == 0,
+ "shared mask must be part of the base mask");
+ static_assert(SHARED_MASK == 0 || (~SHARED_MASK & (MASK & T::sEffectiveMask)) == 0,
+ "mask and base mask can only overlap in shared mask");
+
+ static constexpr Flag sFlagMask = MASK; ///< the mask
+ static constexpr int sFlagShift = SHIFT; ///< the left shift applied to the flags
+
+#ifdef FRIEND_TEST
+ const static bool sFlagCombined = true;
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+protected:
+ /// The effective combined mask used by this class and any wrapped classes if the flags are
+ /// combined.
+ static constexpr IntFlag sEffectiveMask = Flag((MASK << SHIFT) | T::sEffectiveMask);
+ friend struct _Flagged_helper;
+
+public:
+ /**
+ * Wrapper around base class constructor. These take the flags as their first
+ * argument and pass the rest of the arguments to the base class constructor.
+ *
+ * \param flags initial flags
+ */
+ template<typename ...Args>
+ constexpr Flagged(Flag flags, Args... args)
+ : T(std::forward<Args>(args)...) {
+ // we construct the base class first and apply the flags afterwards as
+ // base class may not have a constructor that takes flags even if it is derived from
+ // Flagged<U, Flag>
+ setFlags(flags);
+ }
+
+ /** Gets the wrapped value as const. */
+ inline constexpr T &get() const { return *this; }
+
+ /** Gets the wrapped value. */
+ inline T &get() { return *this; }
+
+ /** Gets the flags. */
+ Flag constexpr flags() const {
+ return Flag(this->getFlagsHelper(sFlagMask, sFlagShift));
+ }
+
+ /** Sets the flags. */
+ void setFlags(Flag flags) {
+ this->setFlagsHelper(sFlagMask, sFlagShift, flags);
+ }
+};
+#endif
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
diff --git a/include/media/stagefright/foundation/TypeTraits.h b/include/media/stagefright/foundation/TypeTraits.h
new file mode 100644
index 0000000..2eaec35
--- /dev/null
+++ b/include/media/stagefright/foundation/TypeTraits.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+#define STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
+#include <type_traits>
+
+namespace android {
+
+/**
+ * std::is_signed, is_unsigned and is_integral does not consider enums even though the standard
+ * considers them integral. Create modified versions of these here. Also create a wrapper around
+ * std::underlying_type that does not require checking if the type is an enum.
+ */
+
+/**
+ * Type support utility class to check if a type is an integral type or an enum.
+ */
+template<typename T>
+struct is_integral_or_enum
+ : std::integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value> { };
+
+/**
+ * Type support utility class to get the underlying std::is_integral supported type for a type.
+ * This returns the underlying type for enums, and the same type for types covered by
+ * std::is_integral.
+ *
+ * This is also used as a conditional to return an alternate type if the template param is not
+ * an integral or enum type (as in underlying_integral_type<T, TypeIfNotEnumOrIntegral>::type).
+ */
+template<typename T,
+ typename U=typename std::enable_if<is_integral_or_enum<T>::value>::type,
+ bool=std::is_enum<T>::value,
+ bool=std::is_integral<T>::value>
+struct underlying_integral_type {
+ static_assert(!std::is_enum<T>::value, "T should not be enum here");
+ static_assert(!std::is_integral<T>::value, "T should not be integral here");
+ typedef U type;
+};
+
+/** Specialization for enums. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, true, false> {
+ static_assert(std::is_enum<T>::value, "T should be enum here");
+ static_assert(!std::is_integral<T>::value, "T should not be integral here");
+ typedef typename std::underlying_type<T>::type type;
+};
+
+/** Specialization for non-enum std-integral types. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, false, true> {
+ static_assert(!std::is_enum<T>::value, "T should not be enum here");
+ static_assert(std::is_integral<T>::value, "T should be integral here");
+ typedef T type;
+};
+
+/**
+ * Type support utility class to check if the underlying integral type is signed.
+ */
+template<typename T>
+struct is_signed_integral
+ : std::integral_constant<bool, std::is_signed<
+ typename underlying_integral_type<T, unsigned>::type>::value> { };
+
+/**
+ * Type support utility class to check if the underlying integral type is unsigned.
+ */
+template<typename T>
+struct is_unsigned_integral
+ : std::integral_constant<bool, std::is_unsigned<
+ typename underlying_integral_type<T, signed>::type>::value> {
+};
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 91cc902..c620e7c 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -11,7 +11,6 @@
libbinder \
libcutils \
liblog \
- libmedia \
libmedialogservice \
libradioservice \
libsoundtriggerservice \
diff --git a/media/libaudioclient/Android.mk b/media/libaudioclient/Android.mk
new file mode 100644
index 0000000..348ab50
--- /dev/null
+++ b/media/libaudioclient/Android.mk
@@ -0,0 +1,50 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES += \
+ AudioEffect.cpp \
+ AudioPolicy.cpp \
+ AudioRecord.cpp \
+ AudioSystem.cpp \
+ AudioTrack.cpp \
+ AudioTrackShared.cpp \
+ IAudioFlinger.cpp \
+ IAudioFlingerClient.cpp \
+ IAudioPolicyService.cpp \
+ IAudioPolicyServiceClient.cpp \
+ IAudioRecord.cpp \
+ IAudioTrack.cpp \
+ IEffect.cpp \
+ IEffectClient.cpp \
+ ToneGenerator.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ liblog libcutils libutils libbinder \
+ libdl libaudioutils \
+
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder
+
+# for memory heap analysis
+LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
+
+LOCAL_MODULE:= libaudioclient
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_C_INCLUDES := \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libstagefright \
+ $(TOP)/frameworks/av/media/libmedia/aidl \
+ $(call include-path-for, audio-utils)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ frameworks/av/include/media \
+ frameworks/av/media/libmedia/aidl
+
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/media/libmedia/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
similarity index 100%
rename from media/libmedia/AudioEffect.cpp
rename to media/libaudioclient/AudioEffect.cpp
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
similarity index 100%
rename from media/libmedia/AudioPolicy.cpp
rename to media/libaudioclient/AudioPolicy.cpp
diff --git a/media/libmedia/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
similarity index 99%
rename from media/libmedia/AudioRecord.cpp
rename to media/libaudioclient/AudioRecord.cpp
index ff5903d..778540c 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -85,7 +85,7 @@
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes)
: mActive(false),
@@ -143,7 +143,7 @@
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes)
{
@@ -236,7 +236,7 @@
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
- if (uid == -1 || (callingpid != mypid)) {
+ if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
@@ -1274,6 +1274,9 @@
return true;
}
}
+ if (exitPending()) {
+ return false;
+ }
nsecs_t ns = mReceiver.processAudioBuffer();
switch (ns) {
case 0:
diff --git a/media/libmedia/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
similarity index 100%
rename from media/libmedia/AudioSystem.cpp
rename to media/libaudioclient/AudioSystem.cpp
diff --git a/media/libmedia/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
similarity index 99%
rename from media/libmedia/AudioTrack.cpp
rename to media/libaudioclient/AudioTrack.cpp
index 6a1e31e..3c7e8b7 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -204,7 +204,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -235,7 +235,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -296,7 +296,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -490,7 +490,7 @@
}
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
- if (uid == -1 || (callingpid != mypid)) {
+ if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
similarity index 100%
rename from media/libmedia/AudioTrackShared.cpp
rename to media/libaudioclient/AudioTrackShared.cpp
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
similarity index 100%
rename from media/libmedia/IAudioFlinger.cpp
rename to media/libaudioclient/IAudioFlinger.cpp
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libaudioclient/IAudioFlingerClient.cpp
similarity index 100%
rename from media/libmedia/IAudioFlingerClient.cpp
rename to media/libaudioclient/IAudioFlingerClient.cpp
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
similarity index 100%
rename from media/libmedia/IAudioPolicyService.cpp
rename to media/libaudioclient/IAudioPolicyService.cpp
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libaudioclient/IAudioPolicyServiceClient.cpp
similarity index 100%
rename from media/libmedia/IAudioPolicyServiceClient.cpp
rename to media/libaudioclient/IAudioPolicyServiceClient.cpp
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libaudioclient/IAudioRecord.cpp
similarity index 100%
rename from media/libmedia/IAudioRecord.cpp
rename to media/libaudioclient/IAudioRecord.cpp
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
similarity index 100%
rename from media/libmedia/IAudioTrack.cpp
rename to media/libaudioclient/IAudioTrack.cpp
diff --git a/media/libmedia/IEffect.cpp b/media/libaudioclient/IEffect.cpp
similarity index 93%
rename from media/libmedia/IEffect.cpp
rename to media/libaudioclient/IEffect.cpp
index 115ca75..ce72dae 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libaudioclient/IEffect.cpp
@@ -25,6 +25,9 @@
namespace android {
+// Maximum command/reply size expected
+#define EFFECT_PARAM_SIZE_MAX 65536
+
enum {
ENABLE = IBinder::FIRST_CALL_TRANSACTION,
DISABLE,
@@ -156,6 +159,10 @@
uint32_t cmdSize = data.readInt32();
char *cmd = NULL;
if (cmdSize) {
+ if (cmdSize > EFFECT_PARAM_SIZE_MAX) {
+ reply->writeInt32(NO_MEMORY);
+ return NO_ERROR;
+ }
cmd = (char *)calloc(cmdSize, 1);
if (cmd == NULL) {
reply->writeInt32(NO_MEMORY);
@@ -167,6 +174,11 @@
uint32_t replySz = replySize;
char *resp = NULL;
if (replySize) {
+ if (replySize > EFFECT_PARAM_SIZE_MAX) {
+ free(cmd);
+ reply->writeInt32(NO_MEMORY);
+ return NO_ERROR;
+ }
resp = (char *)calloc(replySize, 1);
if (resp == NULL) {
free(cmd);
diff --git a/media/libmedia/IEffectClient.cpp b/media/libaudioclient/IEffectClient.cpp
similarity index 100%
rename from media/libmedia/IEffectClient.cpp
rename to media/libaudioclient/IEffectClient.cpp
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
similarity index 100%
rename from media/libmedia/ToneGenerator.cpp
rename to media/libaudioclient/ToneGenerator.cpp
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 8aed146..af8cb50 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -340,8 +340,10 @@
}
delete pContext;
}
- *pHandle = (effect_handle_t)NULL;
+ if (pHandle != NULL)
+ *pHandle = (effect_handle_t)NULL;
} else {
+ if (pHandle != NULL)
*pHandle = (effect_handle_t)pContext;
}
ALOGV("\tEffectCreate end..\n\n");
@@ -2349,8 +2351,12 @@
case EQ_PARAM_BAND_LEVEL:
param2 = *pParamTemp;
- if (param2 >= FIVEBAND_NUMBANDS) {
+ if (param2 < 0 || param2 >= FIVEBAND_NUMBANDS) {
status = -EINVAL;
+ if (param2 < 0) {
+ android_errorWriteLog(0x534e4554, "32438598");
+ ALOGW("\tERROR Equalizer_getParameter() EQ_PARAM_BAND_LEVEL band %d", param2);
+ }
break;
}
*(int16_t *)pValue = (int16_t)EqualizerGetBandLevel(pContext, param2);
@@ -2360,8 +2366,12 @@
case EQ_PARAM_CENTER_FREQ:
param2 = *pParamTemp;
- if (param2 >= FIVEBAND_NUMBANDS) {
+ if (param2 < 0 || param2 >= FIVEBAND_NUMBANDS) {
status = -EINVAL;
+ if (param2 < 0) {
+ android_errorWriteLog(0x534e4554, "32436341");
+ ALOGW("\tERROR Equalizer_getParameter() EQ_PARAM_CENTER_FREQ band %d", param2);
+ }
break;
}
*(int32_t *)pValue = EqualizerGetCentreFrequency(pContext, param2);
@@ -2371,8 +2381,12 @@
case EQ_PARAM_BAND_FREQ_RANGE:
param2 = *pParamTemp;
- if (param2 >= FIVEBAND_NUMBANDS) {
+ if (param2 < 0 || param2 >= FIVEBAND_NUMBANDS) {
status = -EINVAL;
+ if (param2 < 0) {
+ android_errorWriteLog(0x534e4554, "32247948");
+ ALOGW("\tERROR Equalizer_getParameter() EQ_PARAM_BAND_FREQ_RANGE band %d", param2);
+ }
break;
}
EqualizerGetBandFreqRange(pContext, param2, (uint32_t *)pValue, ((uint32_t *)pValue + 1));
@@ -2399,9 +2413,13 @@
case EQ_PARAM_GET_PRESET_NAME:
param2 = *pParamTemp;
- if (param2 >= EqualizerGetNumPresets()) {
- //if (param2 >= 20) { // AGO FIX
+ if ((param2 < 0 && param2 != PRESET_CUSTOM) || param2 >= EqualizerGetNumPresets()) {
status = -EINVAL;
+ if (param2 < 0) {
+ android_errorWriteLog(0x534e4554, "32448258");
+ ALOGE("\tERROR Equalizer_getParameter() EQ_PARAM_GET_PRESET_NAME preset %d",
+ param2);
+ }
break;
}
name = (char *)pValue;
@@ -2471,8 +2489,12 @@
band = *pParamTemp;
level = (int32_t)(*(int16_t *)pValue);
//ALOGV("\tEqualizer_setParameter() EQ_PARAM_BAND_LEVEL band %d, level %d", band, level);
- if (band >= FIVEBAND_NUMBANDS) {
+ if (band < 0 || band >= FIVEBAND_NUMBANDS) {
status = -EINVAL;
+ if (band < 0) {
+ android_errorWriteLog(0x534e4554, "32095626");
+ ALOGE("\tERROR Equalizer_setParameter() EQ_PARAM_BAND_LEVEL band %d", band);
+ }
break;
}
EqualizerSetBandLevel(pContext, band, level);
@@ -2649,8 +2671,8 @@
case VOLUME_PARAM_ENABLESTEREOPOSITION:
positionEnabled = *(uint32_t *)pValue;
- status = VolumeEnableStereoPosition(pContext, positionEnabled);
- status = VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
+ (void) VolumeEnableStereoPosition(pContext, positionEnabled);
+ (void) VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
//ALOGV("\tVolume_setParameter() VOLUME_PARAM_ENABLESTEREOPOSITION called");
break;
@@ -3083,10 +3105,6 @@
//ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_GET_PARAM start");
effect_param_t *p = (effect_param_t *)pCmdData;
- if (SIZE_MAX - sizeof(effect_param_t) < (size_t)p->psize) {
- android_errorWriteLog(0x534e4554, "26347509");
- return -EINVAL;
- }
if (pCmdData == NULL || cmdSize < sizeof(effect_param_t) ||
cmdSize < (sizeof(effect_param_t) + p->psize) ||
pReplyData == NULL || replySize == NULL ||
@@ -3094,13 +3112,32 @@
ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: ERROR");
return -EINVAL;
}
+ if (EFFECT_PARAM_SIZE_MAX - sizeof(effect_param_t) < (size_t)p->psize) {
+ android_errorWriteLog(0x534e4554, "26347509");
+ ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: psize too big");
+ return -EINVAL;
+ }
+ uint32_t paddedParamSize = ((p->psize + sizeof(int32_t) - 1) / sizeof(int32_t)) *
+ sizeof(int32_t);
+ if ((EFFECT_PARAM_SIZE_MAX - sizeof(effect_param_t) < paddedParamSize) ||
+ (EFFECT_PARAM_SIZE_MAX - sizeof(effect_param_t) - paddedParamSize <
+ p->vsize)) {
+ ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: padded_psize or vsize too big");
+ return -EINVAL;
+ }
+ uint32_t expectedReplySize = sizeof(effect_param_t) + paddedParamSize + p->vsize;
+ if (*replySize < expectedReplySize) {
+ ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: min. replySize %u, got %u bytes",
+ expectedReplySize, *replySize);
+ android_errorWriteLog(0x534e4554, "32705438");
+ return -EINVAL;
+ }
memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + p->psize);
p = (effect_param_t *)pReplyData;
- int voffset = ((p->psize - 1) / sizeof(int32_t) + 1) * sizeof(int32_t);
-
+ uint32_t voffset = paddedParamSize;
if(pContext->EffectType == LVM_BASS_BOOST){
p->status = android::BassBoost_getParameter(pContext,
p->data,
diff --git a/media/libeffects/proxy/EffectProxy.cpp b/media/libeffects/proxy/EffectProxy.cpp
index 7decaf6..14ded6a 100644
--- a/media/libeffects/proxy/EffectProxy.cpp
+++ b/media/libeffects/proxy/EffectProxy.cpp
@@ -236,6 +236,11 @@
// pCmdData points to a memory holding effect_offload_param_t structure
if (cmdCode == EFFECT_CMD_OFFLOAD) {
ALOGV("Effect_command() cmdCode = EFFECT_CMD_OFFLOAD");
+ if (replySize == NULL || *replySize < sizeof(int)) {
+ ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no reply");
+ android_errorWriteLog(0x534e4554, "32448121");
+ return FAILED_TRANSACTION;
+ }
if (cmdSize == 0 || pCmdData == NULL) {
ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no data");
*(int*)pReplyData = FAILED_TRANSACTION;
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 4c71907..1d9801f 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -59,6 +59,8 @@
#define DISCARD_MEASUREMENTS_TIME_MS 2000 // discard measurements older than this number of ms
+#define MAX_LATENCY_MS 3000 // 3 seconds of latency for audio pipeline
+
// maximum number of buffers for which we keep track of the measurements
#define MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS 25 // note: buffer index is stored in uint8_t
@@ -519,18 +521,29 @@
break;
}
switch (*(uint32_t *)p->data) {
- case VISUALIZER_PARAM_CAPTURE_SIZE:
- pContext->mCaptureSize = *((uint32_t *)p->data + 1);
- ALOGV("set mCaptureSize = %" PRIu32, pContext->mCaptureSize);
- break;
+ case VISUALIZER_PARAM_CAPTURE_SIZE: {
+ const uint32_t captureSize = *((uint32_t *)p->data + 1);
+ if (captureSize > VISUALIZER_CAPTURE_SIZE_MAX) {
+ android_errorWriteLog(0x534e4554, "31781965");
+ *(int32_t *)pReplyData = -EINVAL;
+ ALOGW("set mCaptureSize = %u > %u", captureSize, VISUALIZER_CAPTURE_SIZE_MAX);
+ } else {
+ pContext->mCaptureSize = captureSize;
+ ALOGV("set mCaptureSize = %u", captureSize);
+ }
+ } break;
case VISUALIZER_PARAM_SCALING_MODE:
pContext->mScalingMode = *((uint32_t *)p->data + 1);
ALOGV("set mScalingMode = %" PRIu32, pContext->mScalingMode);
break;
- case VISUALIZER_PARAM_LATENCY:
- pContext->mLatency = *((uint32_t *)p->data + 1);
- ALOGV("set mLatency = %" PRIu32, pContext->mLatency);
- break;
+ case VISUALIZER_PARAM_LATENCY: {
+ uint32_t latency = *((uint32_t *)p->data + 1);
+ if (latency > MAX_LATENCY_MS) {
+ latency = MAX_LATENCY_MS; // clamp latency b/31781965
+ }
+ pContext->mLatency = latency;
+ ALOGV("set mLatency = %u", latency);
+ } break;
case VISUALIZER_PARAM_MEASUREMENT_MODE:
pContext->mMeasurementMode = *((uint32_t *)p->data + 1);
ALOGV("set mMeasurementMode = %" PRIu32, pContext->mMeasurementMode);
@@ -569,10 +582,18 @@
if (latencyMs < 0) {
latencyMs = 0;
}
- const uint32_t deltaSmpl =
- pContext->mConfig.inputCfg.samplingRate * latencyMs / 1000;
- int32_t capturePoint = pContext->mCaptureIdx - captureSize - deltaSmpl;
+ uint32_t deltaSmpl = captureSize
+ + pContext->mConfig.inputCfg.samplingRate * latencyMs / 1000;
+ // large sample rate, latency, or capture size, could cause overflow.
+ // do not offset more than the size of buffer.
+ if (deltaSmpl > CAPTURE_BUF_SIZE) {
+ android_errorWriteLog(0x534e4554, "31781965");
+ deltaSmpl = CAPTURE_BUF_SIZE;
+ }
+
+ int32_t capturePoint = pContext->mCaptureIdx - deltaSmpl;
+ // a negative capturePoint means we wrap the buffer.
if (capturePoint < 0) {
uint32_t size = -capturePoint;
if (size > captureSize) {
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 7fde4b2..6e28ba9 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,6 +1,6 @@
cc_library_static {
name: "libmedia_helper",
- srcs: ["AudioParameter.cpp"],
+ srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index ba31cfa..f4d8bc0 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -10,26 +10,14 @@
aidl/android/IOMXBufferSource.aidl
LOCAL_SRC_FILES += \
- AudioTrack.cpp \
- AudioTrackShared.cpp \
- IAudioFlinger.cpp \
- IAudioFlingerClient.cpp \
- IAudioTrack.cpp \
- IAudioRecord.cpp \
- ICrypto.cpp \
IDataSource.cpp \
- IDrm.cpp \
- IDrmClient.cpp \
IHDCP.cpp \
- AudioRecord.cpp \
- AudioSystem.cpp \
+ BufferingSettings.cpp \
mediaplayer.cpp \
IMediaCodecList.cpp \
IMediaCodecService.cpp \
- IMediaDrmService.cpp \
IMediaHTTPConnection.cpp \
IMediaHTTPService.cpp \
- IMediaLogService.cpp \
IMediaExtractor.cpp \
IMediaExtractorService.cpp \
IMediaPlayerService.cpp \
@@ -53,11 +41,8 @@
mediametadataretriever.cpp \
MidiDeviceInfo.cpp \
MidiIoWrapper.cpp \
- ToneGenerator.cpp \
JetPlayer.cpp \
IOMX.cpp \
- IAudioPolicyService.cpp \
- IAudioPolicyServiceClient.cpp \
MediaScanner.cpp \
MediaScannerClient.cpp \
CharacterEncodingDetector.cpp \
@@ -66,18 +51,13 @@
MediaResource.cpp \
MediaResourcePolicy.cpp \
OMXBuffer.cpp \
- IEffect.cpp \
- IEffectClient.cpp \
- AudioEffect.cpp \
Visualizer.cpp \
- MemoryLeakTrackUtil.cpp \
StringArray.cpp \
- AudioPolicy.cpp
LOCAL_SHARED_LIBRARIES := \
libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
libcamera_client libstagefright_foundation \
- libgui libdl libaudioutils
+ libgui libdl libaudioutils libaudioclient
LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index f263903..d244a0a 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -83,15 +83,17 @@
mParameters.clear();
}
-String8 AudioParameter::toString()
+String8 AudioParameter::toStringImpl(bool useValues) const
{
String8 str = String8("");
size_t size = mParameters.size();
for (size_t i = 0; i < size; i++) {
str += mParameters.keyAt(i);
- str += "=";
- str += mParameters.valueAt(i);
+ if (useValues) {
+ str += "=";
+ str += mParameters.valueAt(i);
+ }
if (i < (size - 1)) str += ";";
}
return str;
@@ -108,6 +110,11 @@
}
}
+status_t AudioParameter::addKey(const String8& key)
+{
+ return add(key, String8());
+}
+
status_t AudioParameter::addInt(const String8& key, const int value)
{
char str[12];
@@ -140,7 +147,7 @@
}
}
-status_t AudioParameter::get(const String8& key, String8& value)
+status_t AudioParameter::get(const String8& key, String8& value) const
{
if (mParameters.indexOfKey(key) >= 0) {
value = mParameters.valueFor(key);
@@ -150,7 +157,7 @@
}
}
-status_t AudioParameter::getInt(const String8& key, int& value)
+status_t AudioParameter::getInt(const String8& key, int& value) const
{
String8 str8;
status_t result = get(key, str8);
@@ -166,7 +173,7 @@
return result;
}
-status_t AudioParameter::getFloat(const String8& key, float& value)
+status_t AudioParameter::getFloat(const String8& key, float& value) const
{
String8 str8;
status_t result = get(key, str8);
@@ -182,7 +189,7 @@
return result;
}
-status_t AudioParameter::getAt(size_t index, String8& key, String8& value)
+status_t AudioParameter::getAt(size_t index, String8& key, String8& value) const
{
if (mParameters.size() > index) {
key = mParameters.keyAt(index);
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
new file mode 100644
index 0000000..6dc4a53
--- /dev/null
+++ b/media/libmedia/BufferingSettings.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferingSettings"
+//#define LOG_NDEBUG 0
+
+#include <binder/Parcel.h>
+
+#include <media/BufferingSettings.h>
+
+namespace android {
+
+// static
+bool BufferingSettings::IsValidBufferingMode(int mode) {
+ return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
+}
+
+BufferingSettings::BufferingSettings()
+ : mInitialBufferingMode(BUFFERING_MODE_NONE),
+ mRebufferingMode(BUFFERING_MODE_NONE),
+ mInitialWatermarkMs(kNoWatermark),
+ mInitialWatermarkKB(kNoWatermark),
+ mRebufferingWatermarkLowMs(kNoWatermark),
+ mRebufferingWatermarkHighMs(kNoWatermark),
+ mRebufferingWatermarkLowKB(kNoWatermark),
+ mRebufferingWatermarkHighKB(kNoWatermark) { }
+
+status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
+ if (parcel == nullptr) {
+ return BAD_VALUE;
+ }
+ mInitialBufferingMode = (BufferingMode)parcel->readInt32();
+ mRebufferingMode = (BufferingMode)parcel->readInt32();
+ mInitialWatermarkMs = parcel->readInt32();
+ mInitialWatermarkKB = parcel->readInt32();
+ mRebufferingWatermarkLowMs = parcel->readInt32();
+ mRebufferingWatermarkHighMs = parcel->readInt32();
+ mRebufferingWatermarkLowKB = parcel->readInt32();
+ mRebufferingWatermarkHighKB = parcel->readInt32();
+
+ return OK;
+}
+
+status_t BufferingSettings::writeToParcel(Parcel* parcel) const {
+ if (parcel == nullptr) {
+ return BAD_VALUE;
+ }
+ parcel->writeInt32(mInitialBufferingMode);
+ parcel->writeInt32(mRebufferingMode);
+ parcel->writeInt32(mInitialWatermarkMs);
+ parcel->writeInt32(mInitialWatermarkKB);
+ parcel->writeInt32(mRebufferingWatermarkLowMs);
+ parcel->writeInt32(mRebufferingWatermarkHighMs);
+ parcel->writeInt32(mRebufferingWatermarkLowKB);
+ parcel->writeInt32(mRebufferingWatermarkHighKB);
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index f8345e4..9ffde4e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -23,6 +23,7 @@
#include <media/AudioResamplerPublic.h>
#include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
#include <media/IDataSource.h>
#include <media/IMediaHTTPService.h>
@@ -40,6 +41,8 @@
SET_DATA_SOURCE_FD,
SET_DATA_SOURCE_STREAM,
SET_DATA_SOURCE_CALLBACK,
+ SET_BUFFERING_SETTINGS,
+ GET_DEFAULT_BUFFERING_SETTINGS,
PREPARE_ASYNC,
START,
STOP,
@@ -148,6 +151,30 @@
return reply.readInt32();
}
+ status_t setBufferingSettings(const BufferingSettings& buffering)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ buffering.writeToParcel(&data);
+ remote()->transact(SET_BUFFERING_SETTINGS, data, &reply);
+ return reply.readInt32();
+ }
+
+ status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+ {
+ if (buffering == nullptr) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(GET_DEFAULT_BUFFERING_SETTINGS, data, &reply);
+ status_t err = reply.readInt32();
+ if (err == OK) {
+ err = buffering->readFromParcel(&reply);
+ }
+ return err;
+ }
+
status_t prepareAsync()
{
Parcel data, reply;
@@ -246,11 +273,12 @@
return reply.readInt32();
}
- status_t seekTo(int msec)
+ status_t seekTo(int msec, MediaPlayerSeekMode mode)
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
data.writeInt32(msec);
+ data.writeInt32(mode);
remote()->transact(SEEK_TO, data, &reply);
return reply.readInt32();
}
@@ -496,6 +524,23 @@
reply->writeInt32(setVideoSurfaceTexture(bufferProducer));
return NO_ERROR;
} break;
+ case SET_BUFFERING_SETTINGS: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ BufferingSettings buffering;
+ buffering.readFromParcel(&data);
+ reply->writeInt32(setBufferingSettings(buffering));
+ return NO_ERROR;
+ } break;
+ case GET_DEFAULT_BUFFERING_SETTINGS: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ BufferingSettings buffering;
+ status_t err = getDefaultBufferingSettings(&buffering);
+ reply->writeInt32(err);
+ if (err == OK) {
+ buffering.writeToParcel(reply);
+ }
+ return NO_ERROR;
+ } break;
case PREPARE_ASYNC: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
reply->writeInt32(prepareAsync());
@@ -573,7 +618,9 @@
} break;
case SEEK_TO: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
- reply->writeInt32(seekTo(data.readInt32()));
+ int msec = data.readInt32();
+ MediaPlayerSeekMode mode = (MediaPlayerSeekMode)data.readInt32();
+ reply->writeInt32(seekTo(msec, mode));
return NO_ERROR;
} break;
case GET_CURRENT_POSITION: {
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 1a6d6b8..3d466b1 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -37,19 +37,18 @@
CONNECT = IBinder::FIRST_CALL_TRANSACTION,
LIST_NODES,
ALLOCATE_NODE,
+ CREATE_INPUT_SURFACE,
FREE_NODE,
SEND_COMMAND,
GET_PARAMETER,
SET_PARAMETER,
GET_CONFIG,
SET_CONFIG,
- ENABLE_NATIVE_BUFFERS,
- USE_BUFFER,
- CREATE_INPUT_SURFACE,
+ SET_PORT_MODE,
SET_INPUT_SURFACE,
- STORE_META_DATA_IN_BUFFERS,
PREPARE_FOR_ADAPTIVE_PLAYBACK,
ALLOC_SECURE_BUFFER,
+ USE_BUFFER,
FREE_BUFFER,
FILL_BUFFER,
EMPTY_BUFFER,
@@ -225,17 +224,15 @@
return reply.readInt32();
}
- virtual status_t enableNativeBuffers(
- OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
+ virtual status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) {
Parcel data, reply;
data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
- data.writeInt32((uint32_t)graphic);
- data.writeInt32((uint32_t)enable);
- remote()->transact(ENABLE_NATIVE_BUFFERS, data, &reply);
+ data.writeInt32(mode);
+ remote()->transact(SET_PORT_MODE, data, &reply);
- status_t err = reply.readInt32();
- return err;
+ return reply.readInt32();
}
virtual status_t getGraphicBufferUsage(
@@ -294,25 +291,6 @@
return err;
}
- virtual status_t storeMetaDataInBuffers(
- OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
- data.writeInt32(port_index);
- data.writeInt32((int32_t)enable);
- data.writeInt32(type == NULL ? kMetadataBufferTypeANWBuffer : *type);
-
- remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
-
- // read type even storeMetaDataInBuffers failed
- int negotiatedType = reply.readInt32();
- if (type != NULL) {
- *type = (MetadataBufferType)negotiatedType;
- }
-
- return reply.readInt32();
- }
-
virtual status_t prepareForAdaptivePlayback(
OMX_U32 port_index, OMX_BOOL enable,
OMX_U32 max_width, OMX_U32 max_height) {
@@ -612,7 +590,7 @@
params = mmap(NULL, allocSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1 /* fd */, 0 /* offset */);
}
- if (params != MAP_FAILED) {
+ if (params != MAP_FAILED && params != NULL) {
err = data.read(params, size);
if (err != OK) {
android_errorWriteLog(0x534e4554, "26914474");
@@ -670,16 +648,12 @@
return NO_ERROR;
}
- case ENABLE_NATIVE_BUFFERS:
+ case SET_PORT_MODE:
{
CHECK_OMX_INTERFACE(IOMXNode, data, reply);
-
OMX_U32 port_index = data.readInt32();
- OMX_BOOL graphic = (OMX_BOOL)data.readInt32();
- OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
- status_t err = enableNativeBuffers(port_index, graphic, enable);
- reply->writeInt32(err);
+ IOMX::PortMode mode = (IOMX::PortMode) data.readInt32();
+ reply->writeInt32(setPortMode(port_index, mode));
return NO_ERROR;
}
@@ -734,22 +708,6 @@
return NO_ERROR;
}
- case STORE_META_DATA_IN_BUFFERS:
- {
- CHECK_OMX_INTERFACE(IOMXNode, data, reply);
-
- OMX_U32 port_index = data.readInt32();
- OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
- MetadataBufferType type = (MetadataBufferType)data.readInt32();
- status_t err = storeMetaDataInBuffers(port_index, enable, &type);
-
- reply->writeInt32(type);
- reply->writeInt32(err);
-
- return NO_ERROR;
- }
-
case PREPARE_FOR_ADAPTIVE_PLAYBACK:
{
CHECK_OMX_INTERFACE(IOMXNode, data, reply);
diff --git a/media/libmedia/MediaCodecBuffer.cpp b/media/libmedia/MediaCodecBuffer.cpp
index 2af31d0..59d6164 100644
--- a/media/libmedia/MediaCodecBuffer.cpp
+++ b/media/libmedia/MediaCodecBuffer.cpp
@@ -80,8 +80,9 @@
return mFormat;
}
-sp<MediaCodecBuffer> MediaCodecBuffer::clone(const sp<AMessage> &format) {
- return new MediaCodecBuffer(format, mBuffer);
+void MediaCodecBuffer::setFormat(const sp<AMessage> &format) {
+ mMeta->clear();
+ mFormat = format;
}
} // namespace android
diff --git a/media/libmedia/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
index a2110c9..2ae71f7 100644
--- a/media/libmedia/MediaDefs.cpp
+++ b/media/libmedia/MediaDefs.cpp
@@ -57,8 +57,6 @@
const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
-const char *MEDIA_MIMETYPE_CONTAINER_WVM = "video/wvm";
-
const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
index 0931872..2834853 100644
--- a/media/libmedia/OMXBuffer.cpp
+++ b/media/libmedia/OMXBuffer.cpp
@@ -38,10 +38,9 @@
mRangeLength(codecBuffer != NULL ? codecBuffer->size() : 0) {
}
-OMXBuffer::OMXBuffer(const sp<IMemory> &mem, size_t allottedSize)
+OMXBuffer::OMXBuffer(const sp<IMemory> &mem)
: mBufferType(kBufferTypeSharedMem),
- mMem(mem),
- mAllottedSize(allottedSize ? : mem->size()) {
+ mMem(mem) {
}
OMXBuffer::OMXBuffer(const sp<GraphicBuffer> &gbuf)
@@ -68,11 +67,7 @@
case kBufferTypeSharedMem:
{
- status_t err = parcel->writeStrongBinder(IInterface::asBinder(mMem));
- if (err != NO_ERROR) {
- return err;
- }
- return parcel->writeUint32(mAllottedSize);
+ return parcel->writeStrongBinder(IInterface::asBinder(mMem));
}
case kBufferTypeANWBuffer:
@@ -103,10 +98,7 @@
case kBufferTypeSharedMem:
{
- sp<IMemory> params = interface_cast<IMemory>(parcel->readStrongBinder());
-
- mMem = params;
- mAllottedSize = parcel->readUint32();
+ mMem = interface_cast<IMemory>(parcel->readStrongBinder());
break;
}
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
new file mode 100644
index 0000000..54d1fc1
--- /dev/null
+++ b/media/libmedia/TypeConverter.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/TypeConverter.h>
+
+namespace android {
+
+#define MAKE_STRING_FROM_ENUM(string) { #string, string }
+#define TERMINATOR { .literal = nullptr }
+
+template <>
+const OutputDeviceConverter::Table OutputDeviceConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DEFAULT),
+ // STUB must be after DEFAULT, so the latter is picked up by toString first.
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
+ TERMINATOR
+};
+
+template <>
+const InputDeviceConverter::Table InputDeviceConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_COMMUNICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_USB),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
+ // STUB must be after DEFAULT, so the latter is picked up by toString first.
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
+ TERMINATOR
+};
+
+
+template <>
+const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO),
+ TERMINATOR
+};
+
+
+template <>
+const InputFlagConverter::Table InputFlagConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
+ TERMINATOR
+};
+
+
+template <>
+const FormatConverter::Table FormatConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_NB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_WB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
+ TERMINATOR
+};
+
+
+template <>
+const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+ TERMINATOR
+};
+
+
+template <>
+const InputChannelConverter::Table InputChannelConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+ TERMINATOR
+};
+
+template <>
+const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
+ {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
+ {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
+ {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
+ {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
+ {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
+ {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
+ {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
+ {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
+ TERMINATOR
+};
+
+
+template <>
+const GainModeConverter::Table GainModeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
+ TERMINATOR
+};
+
+
+template <>
+const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+ TERMINATOR
+};
+
+template<>
+const AudioModeConverter::Table AudioModeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
+ TERMINATOR
+};
+
+template class TypeConverter<OutputDeviceTraits>;
+template class TypeConverter<InputDeviceTraits>;
+template class TypeConverter<OutputFlagTraits>;
+template class TypeConverter<InputFlagTraits>;
+template class TypeConverter<FormatTraits>;
+template class TypeConverter<OutputChannelTraits>;
+template class TypeConverter<InputChannelTraits>;
+template class TypeConverter<ChannelIndexTraits>;
+template class TypeConverter<GainModeTraits>;
+template class TypeConverter<StreamTraits>;
+template class TypeConverter<AudioModeTraits>;
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device) {
+ return InputDeviceConverter::fromString(literalDevice, device) ||
+ OutputDeviceConverter::fromString(literalDevice, device);
+}
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice) {
+ if (device & AUDIO_DEVICE_BIT_IN) {
+ return InputDeviceConverter::toString(device, literalDevice);
+ } else {
+ return OutputDeviceConverter::toString(device, literalDevice);
+ }
+}
+
+SampleRateTraits::Collection samplingRatesFromString(
+ const std::string &samplingRates, const char *del)
+{
+ SampleRateTraits::Collection samplingRateCollection;
+ collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
+ return samplingRateCollection;
+}
+
+FormatTraits::Collection formatsFromString(
+ const std::string &formats, const char *del)
+{
+ FormatTraits::Collection formatCollection;
+ FormatConverter::collectionFromString(formats, formatCollection, del);
+ return formatCollection;
+}
+
+audio_format_t formatFromString(const std::string &literalFormat, audio_format_t defaultFormat)
+{
+ audio_format_t format;
+ if (literalFormat.empty()) {
+ return defaultFormat;
+ }
+ FormatConverter::fromString(literalFormat, format);
+ return format;
+}
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
+{
+ audio_channel_mask_t channels;
+ if (!OutputChannelConverter::fromString(literalChannels, channels) ||
+ !InputChannelConverter::fromString(literalChannels, channels)) {
+ return AUDIO_CHANNEL_INVALID;
+ }
+ return channels;
+}
+
+ChannelTraits::Collection channelMasksFromString(
+ const std::string &channels, const char *del)
+{
+ ChannelTraits::Collection channelMaskCollection;
+ OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+ InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
+ return channelMaskCollection;
+}
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+ const std::string &inChannels, const char *del)
+{
+ InputChannelTraits::Collection inputChannelMaskCollection;
+ InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+ return inputChannelMaskCollection;
+}
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+ const std::string &outChannels, const char *del)
+{
+ OutputChannelTraits::Collection outputChannelMaskCollection;
+ OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+ return outputChannelMaskCollection;
+}
+
+}; // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index fbe749c..699172b 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -55,7 +55,9 @@
mStreamType = AUDIO_STREAM_MUSIC;
mAudioAttributesParcel = NULL;
mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mSeekPosition = -1;
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mCurrentState = MEDIA_PLAYER_IDLE;
mPrepareSync = false;
mPrepareStatus = NO_ERROR;
@@ -100,7 +102,9 @@
void MediaPlayer::clear_l()
{
mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mSeekPosition = -1;
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mVideoWidth = mVideoHeight = 0;
mRetransmitEndpointValid = false;
}
@@ -508,9 +512,9 @@
return getDuration_l(msec);
}
-status_t MediaPlayer::seekTo_l(int msec)
+status_t MediaPlayer::seekTo_l(int msec, MediaPlayerSeekMode mode)
{
- ALOGV("seekTo %d", msec);
+ ALOGV("seekTo (%d, %d)", msec, mode);
if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
if ( msec < 0 ) {
@@ -537,12 +541,14 @@
// cache duration
mCurrentPosition = msec;
+ mCurrentSeekMode = mode;
if (mSeekPosition < 0) {
mSeekPosition = msec;
- return mPlayer->seekTo(msec);
+ mSeekMode = mode;
+ return mPlayer->seekTo(msec, mode);
}
else {
- ALOGV("Seek in progress - queue up seekTo[%d]", msec);
+ ALOGV("Seek in progress - queue up seekTo[%d, %d]", msec, mode);
return NO_ERROR;
}
}
@@ -551,11 +557,11 @@
return INVALID_OPERATION;
}
-status_t MediaPlayer::seekTo(int msec)
+status_t MediaPlayer::seekTo(int msec, MediaPlayerSeekMode mode)
{
mLockThreadId = getThreadId();
Mutex::Autolock _l(mLock);
- status_t result = seekTo_l(msec);
+ status_t result = seekTo_l(msec, mode);
mLockThreadId = 0;
return result;
@@ -869,14 +875,16 @@
break;
case MEDIA_SEEK_COMPLETE:
ALOGV("Received seek complete");
- if (mSeekPosition != mCurrentPosition) {
- ALOGV("Executing queued seekTo(%d)", mSeekPosition);
+ if (mSeekPosition != mCurrentPosition || (mSeekMode != mCurrentSeekMode)) {
+ ALOGV("Executing queued seekTo(%d, %d)", mCurrentPosition, mCurrentSeekMode);
mSeekPosition = -1;
- seekTo_l(mCurrentPosition);
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
+ seekTo_l(mCurrentPosition, mCurrentSeekMode);
}
else {
ALOGV("All seeks complete - return to regularly scheduled program");
mCurrentPosition = mSeekPosition = -1;
+ mCurrentSeekMode = mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
}
break;
case MEDIA_BUFFERING_UPDATE:
diff --git a/media/libmediaplayerservice/ActivityManager.cpp b/media/libmediaplayerservice/ActivityManager.cpp
index 60a209f..0e6cf7b 100644
--- a/media/libmediaplayerservice/ActivityManager.cpp
+++ b/media/libmediaplayerservice/ActivityManager.cpp
@@ -24,7 +24,7 @@
namespace android {
-const uint32_t OPEN_CONTENT_URI_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION + 4;
+const uint32_t OPEN_CONTENT_URI_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION;
// Perform ContentProvider.openFile() on the given URI, returning
// the resulting native file descriptor. Returns < 0 on error.
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 97e7404..1786e6b 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -26,6 +26,7 @@
libdl \
libgui \
libmedia \
+ libaudioclient \
libmediautils \
libmemunreachable \
libstagefright \
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 605c710..0a9f791 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -26,7 +26,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <utils/Errors.h>
#include <utils/misc.h>
-#include <../libstagefright/include/WVMExtractor.h>
#include "MediaPlayerFactory.h"
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f619e1d..3ad461c 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -972,6 +972,42 @@
return OK;
}
+status_t MediaPlayerService::Client::setBufferingSettings(
+ const BufferingSettings& buffering)
+{
+ ALOGV("[%d] setBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
+ mConnId, buffering.mInitialBufferingMode, buffering.mRebufferingMode,
+ buffering.mInitialWatermarkMs, buffering.mInitialWatermarkKB,
+ buffering.mRebufferingWatermarkLowMs,
+ buffering.mRebufferingWatermarkHighMs,
+ buffering.mRebufferingWatermarkLowKB,
+ buffering.mRebufferingWatermarkHighKB);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == 0) return UNKNOWN_ERROR;
+ return p->setBufferingSettings(buffering);
+}
+
+status_t MediaPlayerService::Client::getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */)
+{
+ sp<MediaPlayerBase> p = getPlayer();
+ // TODO: create mPlayer on demand.
+ if (p == 0) return UNKNOWN_ERROR;
+ status_t ret = p->getDefaultBufferingSettings(buffering);
+ if (ret == NO_ERROR) {
+ ALOGV("[%d] getDefaultBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
+ mConnId, buffering->mInitialBufferingMode, buffering->mRebufferingMode,
+ buffering->mInitialWatermarkMs, buffering->mInitialWatermarkKB,
+ buffering->mRebufferingWatermarkLowMs,
+ buffering->mRebufferingWatermarkHighMs,
+ buffering->mRebufferingWatermarkLowKB,
+ buffering->mRebufferingWatermarkHighKB);
+ } else {
+ ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
+ }
+ return ret;
+}
+
status_t MediaPlayerService::Client::prepareAsync()
{
ALOGV("[%d] prepareAsync", mConnId);
@@ -1121,12 +1157,12 @@
return OK;
}
-status_t MediaPlayerService::Client::seekTo(int msec)
+status_t MediaPlayerService::Client::seekTo(int msec, MediaPlayerSeekMode mode)
{
- ALOGV("[%d] seekTo(%d)", mConnId, msec);
+ ALOGV("[%d] seekTo(%d, %d)", mConnId, msec, mode);
sp<MediaPlayerBase> p = getPlayer();
if (p == 0) return UNKNOWN_ERROR;
- return p->seekTo(msec);
+ return p->seekTo(msec, mode);
}
status_t MediaPlayerService::Client::reset()
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 601b046..8a6ada0 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -288,6 +288,9 @@
virtual void disconnect();
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
virtual status_t prepareAsync();
virtual status_t start();
virtual status_t stop();
@@ -298,7 +301,9 @@
virtual status_t setSyncSettings(const AVSyncSettings& rate, float videoFpsHint);
virtual status_t getSyncSettings(AVSyncSettings* rate /* nonnull */,
float* videoFps /* nonnull */);
- virtual status_t seekTo(int msec);
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
virtual status_t getCurrentPosition(int* msec);
virtual status_t getDuration(int* msec);
virtual status_t reset();
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 94ceae4..49c221b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -369,9 +369,13 @@
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder = sm->getService(String16("media.camera"));
- mCameraDeathListener = new ServiceDeathNotifier(binder, listener,
- MediaPlayerService::CAMERA_PROCESS_DEATH);
- binder->linkToDeath(mCameraDeathListener);
+
+ // If the device does not have a camera, do not create a death listener for it.
+ if (binder != NULL) {
+ mCameraDeathListener = new ServiceDeathNotifier(binder, listener,
+ MediaPlayerService::CAMERA_PROCESS_DEATH);
+ binder->linkToDeath(mCameraDeathListener);
+ }
binder = sm->getService(String16("media.codec"));
mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
diff --git a/media/libmediaplayerservice/TestPlayerStub.h b/media/libmediaplayerservice/TestPlayerStub.h
index 55bf2c8..11fddf6 100644
--- a/media/libmediaplayerservice/TestPlayerStub.h
+++ b/media/libmediaplayerservice/TestPlayerStub.h
@@ -87,7 +87,11 @@
virtual status_t stop() {return mPlayer->stop();}
virtual status_t pause() {return mPlayer->pause();}
virtual bool isPlaying() {return mPlayer->isPlaying();}
- virtual status_t seekTo(int msec) {return mPlayer->seekTo(msec);}
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
+ return mPlayer->seekTo(msec, mode);
+ }
virtual status_t getCurrentPosition(int *p) {
return mPlayer->getCurrentPosition(p);
}
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 4554472..4956fa0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -35,7 +35,6 @@
#include <media/stagefright/Utils.h>
#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
-#include "../../libstagefright/include/WVMExtractor.h"
#include "../../libstagefright/include/HTTPBase.h"
namespace android {
@@ -59,7 +58,6 @@
mFetchTimedTextDataGeneration(0),
mDurationUs(-1ll),
mAudioIsVorbis(false),
- mIsWidevine(false),
mIsSecure(false),
mIsStreaming(false),
mUIDValid(uidValid),
@@ -70,7 +68,6 @@
mPendingReadBufferTypes(0) {
mBufferingMonitor = new BufferingMonitor(notify);
resetDataSource();
- DataSource::RegisterDefaultSniffers();
}
void NuPlayer::GenericSource::resetDataSource() {
@@ -141,44 +138,9 @@
status_t NuPlayer::GenericSource::initFromDataSource() {
sp<IMediaExtractor> extractor;
- String8 mimeType;
- float confidence;
- sp<AMessage> dummy;
- bool isWidevineStreaming = false;
-
CHECK(mDataSource != NULL);
- if (mIsWidevine) {
- isWidevineStreaming = SniffWVM(
- mDataSource, &mimeType, &confidence, &dummy);
- if (!isWidevineStreaming ||
- strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
- ALOGE("unsupported widevine mime: %s", mimeType.string());
- return UNKNOWN_ERROR;
- }
- } else if (mIsStreaming) {
- if (!mDataSource->sniff(&mimeType, &confidence, &dummy)) {
- return UNKNOWN_ERROR;
- }
- isWidevineStreaming = !strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM);
- }
-
- if (isWidevineStreaming) {
- // we don't want cached source for widevine streaming.
- mCachedSource.clear();
- mDataSource = mHttpSource;
- mWVMExtractor = new WVMExtractor(mDataSource);
- mWVMExtractor->setAdaptiveStreamingMode(true);
- if (mUIDValid) {
- mWVMExtractor->setUID(mUID);
- }
- extractor = mWVMExtractor;
- } else {
- extractor = MediaExtractor::Create(mDataSource,
- mimeType.isEmpty() ? NULL : mimeType.string());
- }
+ extractor = MediaExtractor::Create(mDataSource, NULL);
if (extractor == NULL) {
return UNKNOWN_ERROR;
@@ -194,17 +156,6 @@
if (mFileMeta->findInt64(kKeyDuration, &duration)) {
mDurationUs = duration;
}
-
- if (!mIsWidevine) {
- // Check mime to see if we actually have a widevine source.
- // If the data source is not URL-type (eg. file source), we
- // won't be able to tell until now.
- const char *fileMime;
- if (mFileMeta->findCString(kKeyMIMEType, &fileMime)
- && !strncasecmp(fileMime, "video/wvm", 9)) {
- mIsWidevine = true;
- }
- }
}
int32_t totalBitrate = 0;
@@ -296,6 +247,9 @@
// Widevine sources might re-initialize crypto when starting, if we delay
// this to start(), all data buffered during prepare would be wasted.
// (We don't actually start reading until start().)
+ //
+ // TODO: this logic may no longer be relevant after the removal of widevine
+ // support
if (mAudioTrack.mSource != NULL && mAudioTrack.mSource->start() != OK) {
ALOGE("failed to start audio track!");
return UNKNOWN_ERROR;
@@ -378,11 +332,8 @@
if (!mUri.empty()) {
const char* uri = mUri.c_str();
String8 contentType;
- mIsWidevine = !strncasecmp(uri, "widevine://", 11);
- if (!strncasecmp("http://", uri, 7)
- || !strncasecmp("https://", uri, 8)
- || mIsWidevine) {
+ if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
mHttpSource = DataSource::CreateMediaHTTP(mHTTPService);
if (mHttpSource == NULL) {
ALOGE("Failed to create http source!");
@@ -395,8 +346,6 @@
mHTTPService, uri, &mUriHeaders, &contentType,
static_cast<HTTPBase *>(mHttpSource.get()));
} else {
- mIsWidevine = false;
-
mDataSource = new FileSource(mFd, mOffset, mLength);
mFd = -1;
}
@@ -412,13 +361,9 @@
mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
}
- // For widevine or other cached streaming cases, we need to wait for
- // enough buffering before reporting prepared.
- // Note that even when URL doesn't start with widevine://, mIsWidevine
- // could still be set to true later, if the streaming or file source
- // is sniffed to be widevine. We don't want to buffer for file source
- // in that case, so must check the flag now.
- mIsStreaming = (mIsWidevine || mCachedSource != NULL);
+ // For cached streaming cases, we need to wait for enough
+ // buffering before reporting prepared.
+ mIsStreaming = (mCachedSource != NULL);
// init extractor from data source
status_t err = initFromDataSource();
@@ -450,6 +395,9 @@
if (mIsSecure) {
// secure decoders must be instantiated before starting widevine source
+ //
+ // TODO: mIsSecure and FLAG_SECURE may be obsolete, revisit after
+ // removing widevine
sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
notifyInstantiateSecureDecoders(reply);
} else {
@@ -476,7 +424,7 @@
if (mIsStreaming) {
if (mBufferingMonitorLooper == NULL) {
- mBufferingMonitor->prepare(mCachedSource, mWVMExtractor, mDurationUs, mBitrate,
+ mBufferingMonitor->prepare(mCachedSource, mDurationUs, mBitrate,
mIsStreaming);
mBufferingMonitorLooper = new ALooper;
@@ -536,12 +484,6 @@
// nothing to do, just account for DRM playback status
setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
mStarted = false;
- if (mIsWidevine || mIsSecure) {
- // For widevine or secure sources we need to prevent any further reads.
- sp<AMessage> msg = new AMessage(kWhatStopWidevine, this);
- sp<AMessage> response;
- (void) msg->postAndAwaitResponse(&response);
- }
}
void NuPlayer::GenericSource::pause() {
@@ -665,8 +607,10 @@
} else {
timeUs = mVideoLastDequeueTimeUs;
}
- readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
- readBuffer(counterpartType, -1, NULL, !formatChange);
+ readBuffer(trackType, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ &actualTimeUs, formatChange);
+ readBuffer(counterpartType, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ NULL, !formatChange);
ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
break;
@@ -717,20 +661,6 @@
break;
}
- case kWhatStopWidevine:
- {
- // mStopRead is only used for Widevine to prevent the video source
- // from being read while the associated video decoder is shutting down.
- mStopRead = true;
- if (mVideoTrack.mSource != NULL) {
- mVideoTrack.mPackets->clear();
- }
- sp<AMessage> response = new AMessage;
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
default:
Source::onMessageReceived(msg);
break;
@@ -759,7 +689,7 @@
CHECK(msg->findInt64("timeUs", &timeUs));
int64_t subTimeUs;
- readBuffer(type, timeUs, &subTimeUs);
+ readBuffer(type, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
int64_t delayUs = subTimeUs - timeUs;
if (msg->what() == kWhatFetchSubtitleData) {
@@ -790,7 +720,7 @@
}
int64_t nextSubTimeUs;
- readBuffer(type, -1, &nextSubTimeUs);
+ readBuffer(type, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
sp<ABuffer> buffer;
status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
@@ -886,11 +816,6 @@
return -EWOULDBLOCK;
}
- if (mIsWidevine && !audio) {
- // try to read a buffer as we may not have been able to the last time
- postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
- }
-
status_t finalResult;
if (!track->mPackets->hasBufferAvailable(&finalResult)) {
if (finalResult == OK) {
@@ -1186,9 +1111,10 @@
return INVALID_OPERATION;
}
-status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -1201,10 +1127,12 @@
void NuPlayer::GenericSource::onSeek(const sp<AMessage>& msg) {
int64_t seekTimeUs;
+ int32_t mode;
CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
sp<AMessage> response = new AMessage;
- status_t err = doSeek(seekTimeUs);
+ status_t err = doSeek(seekTimeUs, (MediaPlayerSeekMode)mode);
response->setInt32("err", err);
sp<AReplyToken> replyID;
@@ -1212,20 +1140,25 @@
response->postReply(replyID);
}
-status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
mBufferingMonitor->updateDequeuedBufferTime(-1ll);
// If the Widevine source is stopped, do not attempt to read any
// more buffers.
+ //
+ // TODO: revisit after widevine is removed. May be able to
+ // combine mStopRead with mStarted.
if (mStopRead) {
return INVALID_OPERATION;
}
if (mVideoTrack.mSource != NULL) {
int64_t actualTimeUs;
- readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
+ readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
- seekTimeUs = actualTimeUs;
- mVideoLastDequeueTimeUs = seekTimeUs;
+ if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) {
+ seekTimeUs = actualTimeUs;
+ }
+ mVideoLastDequeueTimeUs = actualTimeUs;
}
if (mAudioTrack.mSource != NULL) {
@@ -1249,9 +1182,7 @@
sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
MediaBuffer* mb,
- media_track_type trackType,
- int64_t /* seekTimeUs */,
- int64_t *actualTimeUs) {
+ media_track_type trackType) {
bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
size_t outLength = mb->range_length();
@@ -1288,16 +1219,6 @@
CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
meta->setInt64("timeUs", timeUs);
-#if 0
- // Temporarily disable pre-roll till we have a full solution to handle
- // both single seek and continous seek gracefully.
- if (seekTimeUs > timeUs) {
- sp<AMessage> extra = new AMessage;
- extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
- meta->setMessage("extra", extra);
- }
-#endif
-
if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
int32_t layerId;
if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
@@ -1337,10 +1258,6 @@
meta->setBuffer("mpegUserData", mpegUserData);
}
- if (actualTimeUs) {
- *actualTimeUs = timeUs;
- }
-
mb->release();
mb = NULL;
@@ -1372,8 +1289,12 @@
}
void NuPlayer::GenericSource::readBuffer(
- media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
+ media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
+ int64_t *actualTimeUs, bool formatChange) {
// Do not read data if Widevine source is stopped
+ //
+ // TODO: revisit after widevine is removed. May be able to
+ // combine mStopRead with mStarted.
if (mStopRead) {
return;
}
@@ -1382,19 +1303,11 @@
switch (trackType) {
case MEDIA_TRACK_TYPE_VIDEO:
track = &mVideoTrack;
- if (mIsWidevine) {
- maxBuffers = 2;
- } else {
- maxBuffers = 8; // too large of a number may influence seeks
- }
+ maxBuffers = 8; // too large of a number may influence seeks
break;
case MEDIA_TRACK_TYPE_AUDIO:
track = &mAudioTrack;
- if (mIsWidevine) {
- maxBuffers = 8;
- } else {
- maxBuffers = 64;
- }
+ maxBuffers = 64;
break;
case MEDIA_TRACK_TYPE_SUBTITLE:
track = &mSubtitleTrack;
@@ -1418,13 +1331,13 @@
bool seeking = false;
if (seekTimeUs >= 0) {
- options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ options.setSeekTo(seekTimeUs, mode);
seeking = true;
}
- const bool couldReadMultiple = (!mIsWidevine && track->mSource->supportReadMultiple());
+ const bool couldReadMultiple = (track->mSource->supportReadMultiple());
- if (mIsWidevine || couldReadMultiple) {
+ if (couldReadMultiple) {
options.setNonBlocking();
}
@@ -1465,9 +1378,20 @@
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
- sp<ABuffer> buffer = mediaBufferToABuffer(
- mbuf, trackType, seekTimeUs,
- numBuffers == 0 ? actualTimeUs : NULL);
+ sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType);
+ if (numBuffers == 0 && actualTimeUs != nullptr) {
+ *actualTimeUs = timeUs;
+ }
+ if (seeking && buffer != nullptr) {
+ sp<AMessage> meta = buffer->meta();
+ if (meta != nullptr && mode == MediaPlayerSeekMode::SEEK_CLOSEST
+ && seekTimeUs > timeUs) {
+ sp<AMessage> extra = new AMessage;
+ extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
+ meta->setMessage("extra", extra);
+ }
+ }
+
track->mPackets->queueAccessUnit(buffer);
formatChange = false;
seeking = false;
@@ -1535,17 +1459,16 @@
void NuPlayer::GenericSource::BufferingMonitor::prepare(
const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming) {
Mutex::Autolock _l(mLock);
- prepare_l(cachedSource, wvmExtractor, durationUs, bitrate, isStreaming);
+ prepare_l(cachedSource, durationUs, bitrate, isStreaming);
}
void NuPlayer::GenericSource::BufferingMonitor::stop() {
Mutex::Autolock _l(mLock);
- prepare_l(NULL /* cachedSource */, NULL /* wvmExtractor */, -1 /* durationUs */,
+ prepare_l(NULL /* cachedSource */, -1 /* durationUs */,
-1 /* bitrate */, false /* isStreaming */);
}
@@ -1600,22 +1523,17 @@
void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming) {
- ALOGW_IF(wvmExtractor != NULL && cachedSource != NULL,
- "WVMExtractor and NuCachedSource are both present when "
- "BufferingMonitor::prepare_l is called, ignore NuCachedSource");
mCachedSource = cachedSource;
- mWVMExtractor = wvmExtractor;
mDurationUs = durationUs;
mBitrate = bitrate;
mIsStreaming = isStreaming;
mAudioTimeUs = 0;
mVideoTimeUs = 0;
- mPrepareBuffering = (cachedSource != NULL || wvmExtractor != NULL);
+ mPrepareBuffering = (cachedSource != NULL);
cancelPollBuffering_l();
mOffloadAudio = false;
mFirstDequeuedBufferRealUs = -1ll;
@@ -1699,9 +1617,7 @@
int32_t kbps = 0;
status_t err = UNKNOWN_ERROR;
- if (mWVMExtractor != NULL) {
- err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
- } else if (mCachedSource != NULL) {
+ if (mCachedSource != NULL) {
err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
}
@@ -1741,10 +1657,7 @@
int64_t cachedDurationUs = -1ll;
ssize_t cachedDataRemaining = -1;
- if (mWVMExtractor != NULL) {
- cachedDurationUs =
- mWVMExtractor->getCachedDurationUs(&finalStatus);
- } else if (mCachedSource != NULL) {
+ if (mCachedSource != NULL) {
cachedDataRemaining =
mCachedSource->approxDataRemaining(&finalStatus);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 0957778..38d8616 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -37,7 +37,6 @@
struct MediaSource;
class MediaBuffer;
struct NuCachedSource2;
-class WVMExtractor;
struct NuPlayer::GenericSource : public NuPlayer::Source {
GenericSource(const sp<AMessage> ¬ify, bool uidValid, uid_t uid);
@@ -71,7 +70,9 @@
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual ssize_t getSelectedTrack(media_track_type type) const;
virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
@@ -101,7 +102,6 @@
kWhatSelectTrack,
kWhatSeek,
kWhatReadBuffer,
- kWhatStopWidevine,
kWhatStart,
kWhatResume,
kWhatSecureDecodersInstantiated,
@@ -121,7 +121,6 @@
// Set up state.
void prepare(const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming);
@@ -155,7 +154,6 @@
sp<AMessage> mNotify;
sp<NuCachedSource2> mCachedSource;
- sp<WVMExtractor> mWVMExtractor;
int64_t mDurationUs;
int64_t mBitrate;
bool mIsStreaming;
@@ -175,7 +173,6 @@
int64_t mlastDequeuedBufferMediaUs;
void prepare_l(const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming);
@@ -204,7 +201,6 @@
int32_t mFetchTimedTextDataGeneration;
int64_t mDurationUs;
bool mAudioIsVorbis;
- bool mIsWidevine;
bool mIsSecure;
bool mIsStreaming;
bool mUIDValid;
@@ -219,7 +215,6 @@
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<DataSource> mHttpSource;
- sp<WVMExtractor> mWVMExtractor;
sp<MetaData> mFileMeta;
DrmManagerClient *mDrmManagerClient;
sp<DecryptHandle> mDecryptHandle;
@@ -258,7 +253,7 @@
status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
void onSeek(const sp<AMessage>& msg);
- status_t doSeek(int64_t seekTimeUs);
+ status_t doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
void onPrepareAsync();
@@ -276,15 +271,20 @@
sp<ABuffer> mediaBufferToABuffer(
MediaBuffer *mbuf,
- media_track_type trackType,
- int64_t seekTimeUs,
- int64_t *actualTimeUs = NULL);
+ media_track_type trackType);
void postReadBuffer(media_track_type trackType);
void onReadBuffer(const sp<AMessage>& msg);
+ // When |mode| is MediaPlayerSeekMode::SEEK_CLOSEST, the buffer read shall
+ // include an item indicating skipping rendering all buffers with timestamp
+ // earlier than |seekTimeUs|.
+ // For other modes, the buffer read will not include the item as above in order
+ // to facilitate fast seek operation.
void readBuffer(
media_track_type trackType,
- int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
+ int64_t seekTimeUs = -1ll,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+ int64_t *actualTimeUs = NULL, bool formatChange = false);
void queueDiscontinuityIfNeeded(
bool seeking, bool formatChange, media_track_type trackType, Track *track);
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index ebba93c..51bfad4 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -214,8 +214,8 @@
return (err == OK || err == BAD_VALUE) ? (status_t)OK : err;
}
-status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
- return mLiveSession->seekTo(seekTimeUs);
+status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ return mLiveSession->seekTo(seekTimeUs, mode);
}
void NuPlayer::HTTPLiveSource::pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 574937d..45fc8c1 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -47,7 +47,9 @@
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
protected:
virtual ~HTTPLiveSource();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0490fd5..1476206 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -70,16 +70,18 @@
};
struct NuPlayer::SeekAction : public Action {
- explicit SeekAction(int64_t seekTimeUs)
- : mSeekTimeUs(seekTimeUs) {
+ explicit SeekAction(int64_t seekTimeUs, MediaPlayerSeekMode mode)
+ : mSeekTimeUs(seekTimeUs),
+ mMode(mode) {
}
virtual void execute(NuPlayer *player) {
- player->performSeek(mSeekTimeUs);
+ player->performSeek(mSeekTimeUs, mMode);
}
private:
int64_t mSeekTimeUs;
+ MediaPlayerSeekMode mMode;
DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
};
@@ -261,9 +263,6 @@
} else {
sp<GenericSource> genericSource =
new GenericSource(notify, mUIDValid, mUID);
- // Don't set FLAG_SECURE on mSourceFlags here for widevine.
- // The correct flags will be updated in Source::kWhatFlagsChanged
- // handler when GenericSource is prepared.
status_t err = genericSource->setDataSource(httpService, url, headers);
@@ -420,9 +419,10 @@
(new AMessage(kWhatReset, this))->post();
}
-void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) {
+void NuPlayer::seekToAsync(int64_t seekTimeUs, MediaPlayerSeekMode mode, bool needNotify) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
msg->setInt32("needNotify", needNotify);
msg->post();
}
@@ -681,7 +681,8 @@
int64_t currentPositionUs = 0;
if (getCurrentPosition(¤tPositionUs) == OK) {
mDeferredActions.push_back(
- new SeekAction(currentPositionUs));
+ new SeekAction(currentPositionUs,
+ MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
}
}
@@ -1197,12 +1198,14 @@
case kWhatSeek:
{
int64_t seekTimeUs;
+ int32_t mode;
int32_t needNotify;
CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
CHECK(msg->findInt32("needNotify", &needNotify));
- ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
- (long long)seekTimeUs, needNotify);
+ ALOGV("kWhatSeek seekTimeUs=%lld us, mode=%d, needNotify=%d",
+ (long long)seekTimeUs, mode, needNotify);
if (!mStarted) {
// Seek before the player is started. In order to preview video,
@@ -1210,7 +1213,7 @@
// only once if needed. After the player is started, any seek
// operation will go through normal path.
// Audio-only cases are handled separately.
- onStart(seekTimeUs);
+ onStart(seekTimeUs, (MediaPlayerSeekMode)mode);
if (mStarted) {
onPause();
mPausedByClient = true;
@@ -1226,7 +1229,7 @@
FLUSH_CMD_FLUSH /* video */));
mDeferredActions.push_back(
- new SeekAction(seekTimeUs));
+ new SeekAction(seekTimeUs, (MediaPlayerSeekMode)mode));
// After a flush without shutdown, decoder is paused.
// Don't resume it until source seek is done, otherwise it could
@@ -1315,13 +1318,13 @@
return OK;
}
-void NuPlayer::onStart(int64_t startPositionUs) {
+void NuPlayer::onStart(int64_t startPositionUs, MediaPlayerSeekMode mode) {
if (!mSourceStarted) {
mSourceStarted = true;
mSource->start();
}
if (startPositionUs > 0) {
- performSeek(startPositionUs);
+ performSeek(startPositionUs, mode);
if (mSource->getFormat(false /* audio */) == NULL) {
return;
}
@@ -1537,7 +1540,7 @@
mRenderer->flush(false /* audio */, false /* notifyComplete */);
}
- performSeek(currentPositionUs);
+ performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
if (forceNonOffload) {
mRenderer->signalDisableOffloadAudio();
@@ -1641,7 +1644,7 @@
} else {
mSource->setOffloadAudio(false /* offload */);
- *decoder = new Decoder(notify, mSource, mPID, mRenderer);
+ *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
}
} else {
sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
@@ -1649,7 +1652,7 @@
notify->setInt32("generation", mVideoDecoderGeneration);
*decoder = new Decoder(
- notify, mSource, mPID, mRenderer, mSurface, mCCDecoder);
+ notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder);
// enable FRC if high-quality AV sync is requested, even if not
// directly queuing to display, as this will even improve textureview
@@ -1665,29 +1668,6 @@
(*decoder)->init();
(*decoder)->configure(format);
- // allocate buffers to decrypt widevine source buffers
- if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
- Vector<sp<MediaCodecBuffer> > inputBufs;
- CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
-
- Vector<MediaBuffer *> mediaBufs;
- for (size_t i = 0; i < inputBufs.size(); i++) {
- const sp<MediaCodecBuffer> &buffer = inputBufs[i];
- MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
- mediaBufs.push(mbuf);
- }
-
- status_t err = mSource->setBuffers(audio, mediaBufs);
- if (err != OK) {
- for (size_t i = 0; i < mediaBufs.size(); ++i) {
- mediaBufs[i]->release();
- }
- mediaBufs.clear();
- ALOGE("Secure source didn't support secure mediaBufs.");
- return err;
- }
- }
-
if (!audio) {
sp<AMessage> params = new AMessage();
float rate = getFrameRate();
@@ -1994,10 +1974,9 @@
}
}
-void NuPlayer::performSeek(int64_t seekTimeUs) {
- ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)",
- (long long)seekTimeUs,
- seekTimeUs / 1E6);
+void NuPlayer::performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
+ (long long)seekTimeUs, seekTimeUs / 1E6, mode);
if (mSource == NULL) {
// This happens when reset occurs right before the loop mode
@@ -2008,7 +1987,7 @@
return;
}
mPreviousSeekTimeUs = seekTimeUs;
- mSource->seekTo(seekTimeUs);
+ mSource->seekTo(seekTimeUs, mode);
++mTimedTextGeneration;
// everything's flushed, continue playback.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index a002f6f..c8b0102 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -70,7 +70,10 @@
// Will notify the driver through "notifySeekComplete" once finished
// and needNotify is true.
- void seekToAsync(int64_t seekTimeUs, bool needNotify = false);
+ void seekToAsync(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+ bool needNotify = false);
status_t setVideoScalingMode(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
@@ -245,7 +248,9 @@
void handleFlushComplete(bool audio, bool isDecoder);
void finishFlushIfPossible();
- void onStart(int64_t startPositionUs = -1);
+ void onStart(
+ int64_t startPositionUs = -1,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
void onResume();
void onPause();
@@ -263,7 +268,7 @@
void processDeferredActions();
- void performSeek(int64_t seekTimeUs);
+ void performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
void performDecoderFlush(FlushCommand audio, FlushCommand video);
void performReset();
void performScanSources();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index c3274f0..e1d426a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -58,6 +58,7 @@
const sp<AMessage> ¬ify,
const sp<Source> &source,
pid_t pid,
+ uid_t uid,
const sp<Renderer> &renderer,
const sp<Surface> &surface,
const sp<CCDecoder> &ccDecoder)
@@ -67,6 +68,7 @@
mRenderer(renderer),
mCCDecoder(ccDecoder),
mPid(pid),
+ mUid(uid),
mSkipRenderingUntilMediaTimeUs(-1ll),
mNumFramesTotal(0ll),
mNumInputFramesDropped(0ll),
@@ -266,7 +268,7 @@
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
mCodec = MediaCodec::CreateByType(
- mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
+ mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid);
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
@@ -275,7 +277,7 @@
mCodec->release();
ALOGI("[%s] creating", mComponentName.c_str());
mCodec = MediaCodec::CreateByComponentName(
- mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid);
+ mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid, mUid);
}
}
if (mCodec == NULL) {
@@ -409,17 +411,7 @@
}
void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
- bool hadNoRenderer = (mRenderer == NULL);
mRenderer = renderer;
- if (hadNoRenderer && mRenderer != NULL) {
- // this means that the widevine legacy source is ready
- onRequestInputBuffers();
- }
-}
-
-void NuPlayer::Decoder::onGetInputBuffers(
- Vector<sp<MediaCodecBuffer> > *dstBuffers) {
- CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
}
void NuPlayer::Decoder::onResume(bool notifyComplete) {
@@ -516,9 +508,7 @@
* returns true if we should request more data
*/
bool NuPlayer::Decoder::doRequestBuffers() {
- // mRenderer is only NULL if we have a legacy widevine source that
- // is not yet ready. In this case we must not fetch input.
- if (isDiscontinuityPending() || mRenderer == NULL) {
+ if (isDiscontinuityPending()) {
return false;
}
status_t err = OK;
@@ -709,6 +699,10 @@
flags = AUDIO_OUTPUT_FLAG_NONE;
}
+ // TODO: This is a temporary fix to flush audio buffers in renderer. The real
+ // fix should be to wait for all buffers rendered normally, then open a new
+ // AudioSink.
+ mRenderer->flush(true /* audio */, false /* notifyComplete */);
status_t err = mRenderer->openAudioSink(
format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
if (err != OK) {
@@ -872,40 +866,6 @@
bool hasBuffer = msg->findBuffer("buffer", &buffer);
bool needsCopy = true;
- // handle widevine classic source - that fills an arbitrary input buffer
- MediaBuffer *mediaBuffer = NULL;
- if (hasBuffer) {
- mediaBuffer = (MediaBuffer *)(buffer->getMediaBufferBase());
- if (mediaBuffer != NULL) {
- // likely filled another buffer than we requested: adjust buffer index
- size_t ix;
- for (ix = 0; ix < mInputBuffers.size(); ix++) {
- const sp<MediaCodecBuffer> &buf = mInputBuffers[ix];
- if (buf->data() == mediaBuffer->data()) {
- // all input buffers are dequeued on start, hence the check
- if (!mInputBufferIsDequeued[ix]) {
- ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
- mComponentName.c_str(), ix, bufferIx);
- mediaBuffer->release();
- return false;
- }
-
- // TRICKY: need buffer for the metadata, so instead, set
- // codecBuffer to the same (though incorrect) buffer to
- // avoid a memcpy into the codecBuffer
- codecBuffer = new MediaCodecBuffer(codecBuffer->format(), buffer);
- codecBuffer->setRange(
- mediaBuffer->range_offset(),
- mediaBuffer->range_length());
- bufferIx = ix;
- needsCopy = false;
- break;
- }
- }
- CHECK(ix < mInputBuffers.size());
- }
- }
-
if (buffer == NULL /* includes !hasBuffer */) {
int32_t streamErr = ERROR_END_OF_STREAM;
CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
@@ -975,18 +935,11 @@
timeUs,
flags);
if (err != OK) {
- if (mediaBuffer != NULL) {
- mediaBuffer->release();
- }
ALOGE("Failed to queue input buffer for %s (err=%d)",
mComponentName.c_str(), err);
handleError(err);
} else {
mInputBufferIsDequeued.editItemAt(bufferIx) = false;
- if (mediaBuffer != NULL) {
- CHECK(mMediaBuffers[bufferIx] == NULL);
- mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
- }
}
}
return true;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 8186862..fcf601b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -29,6 +29,7 @@
Decoder(const sp<AMessage> ¬ify,
const sp<Source> &source,
pid_t pid,
+ uid_t uid,
const sp<Renderer> &renderer = NULL,
const sp<Surface> &surface = NULL,
const sp<CCDecoder> &ccDecoder = NULL);
@@ -46,7 +47,6 @@
virtual void onConfigure(const sp<AMessage> &format);
virtual void onSetParameters(const sp<AMessage> ¶ms);
virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onGetInputBuffers(Vector<sp<MediaCodecBuffer> > *dstBuffers);
virtual void onResume(bool notifyComplete);
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
@@ -85,6 +85,7 @@
Vector<size_t> mDequeuedInputBuffers;
const pid_t mPid;
+ const uid_t mUid;
int64_t mSkipRenderingUntilMediaTimeUs;
int64_t mNumFramesTotal;
int64_t mNumInputFramesDropped;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 9c007ae..1210dc9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -92,14 +92,6 @@
PostAndAwaitResponse(msg, &response);
}
-status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
- sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
- msg->setPointer("buffers", buffers);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
void NuPlayer::DecoderBase::signalFlush() {
(new AMessage(kWhatFlush, this))->post();
}
@@ -166,20 +158,6 @@
break;
}
- case kWhatGetInputBuffers:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- Vector<sp<MediaCodecBuffer> > *dstBuffers;
- CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
-
- onGetInputBuffers(dstBuffers);
-
- (new AMessage)->postReply(replyID);
- break;
- }
-
case kWhatRequestInputBuffers:
{
mRequestInputBuffersPending = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 6f4ead6..6811903 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -43,7 +43,6 @@
void setRenderer(const sp<Renderer> &renderer);
virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
- status_t getInputBuffers(Vector<sp<MediaCodecBuffer> > *dstBuffers) const;
void signalFlush();
void signalResume(bool notifyComplete);
void initiateShutdown();
@@ -71,7 +70,6 @@
virtual void onConfigure(const sp<AMessage> &format) = 0;
virtual void onSetParameters(const sp<AMessage> ¶ms) = 0;
virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
- virtual void onGetInputBuffers(Vector<sp<MediaCodecBuffer> > *dstBuffers) = 0;
virtual void onResume(bool notifyComplete) = 0;
virtual void onFlush() = 0;
virtual void onShutdown(bool notifyComplete) = 0;
@@ -91,7 +89,6 @@
kWhatSetParameters = 'setP',
kWhatSetRenderer = 'setR',
kWhatPause = 'paus',
- kWhatGetInputBuffers = 'gInB',
kWhatRequestInputBuffers = 'reqB',
kWhatFlush = 'flus',
kWhatShutdown = 'shuD',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index e4767ff..cb668e4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -93,11 +93,6 @@
"ignoring request to change renderer");
}
-void NuPlayer::DecoderPassThrough::onGetInputBuffers(
- Vector<sp<MediaCodecBuffer> > * /* dstBuffers */) {
- ALOGE("onGetInputBuffers() called unexpectedly");
-}
-
bool NuPlayer::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index 9af25ff..173387a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -38,7 +38,6 @@
virtual void onConfigure(const sp<AMessage> &format);
virtual void onSetParameters(const sp<AMessage> ¶ms);
virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onGetInputBuffers(Vector<sp<MediaCodecBuffer> > *dstBuffers);
virtual void onResume(bool notifyComplete);
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 0f4dce9..3efa54c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -208,7 +208,8 @@
mAtEOS = false;
mState = STATE_STOPPED_AND_PREPARING;
mIsAsyncPrepare = false;
- mPlayer->seekToAsync(0, true /* needNotify */);
+ mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ true /* needNotify */);
while (mState == STATE_STOPPED_AND_PREPARING) {
mCondition.wait(mLock);
}
@@ -233,7 +234,8 @@
mAtEOS = false;
mState = STATE_STOPPED_AND_PREPARING;
mIsAsyncPrepare = true;
- mPlayer->seekToAsync(0, true /* needNotify */);
+ mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ true /* needNotify */);
return OK;
default:
return INVALID_OPERATION;
@@ -382,8 +384,8 @@
return mPlayer->getSyncSettings(sync, videoFps);
}
-status_t NuPlayerDriver::seekTo(int msec) {
- ALOGD("seekTo(%p) %d ms at state %d", this, msec, mState);
+status_t NuPlayerDriver::seekTo(int msec, MediaPlayerSeekMode mode) {
+ ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
Mutex::Autolock autoLock(mLock);
int64_t seekTimeUs = msec * 1000ll;
@@ -398,7 +400,7 @@
mSeekInProgress = true;
// seeks can take a while, so we essentially paused
notifyListener_l(MEDIA_PAUSED);
- mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
+ mPlayer->seekToAsync(seekTimeUs, mode, true /* needNotify */);
break;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 58008f0..317b34c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -53,7 +53,8 @@
virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
- virtual status_t seekTo(int msec);
+ virtual status_t seekTo(
+ int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
virtual status_t getCurrentPosition(int *msec);
virtual status_t getDuration(int *msec);
virtual status_t reset();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 3a96138..5197167 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -104,7 +104,9 @@
return INVALID_OPERATION;
}
- virtual status_t seekTo(int64_t /* seekTimeUs */) {
+ virtual status_t seekTo(
+ int64_t /* seekTimeUs */,
+ MediaPlayerSeekMode /* mode */ = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
return INVALID_OPERATION;
}
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 7ce909d..fb1f31a 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -279,10 +279,11 @@
return OK;
}
-status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
msg->setInt32("generation", ++mSeekGeneration);
msg->setInt64("timeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -465,9 +466,12 @@
}
int64_t seekTimeUs;
+ int32_t mode;
CHECK(msg->findInt64("timeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
- performSeek(seekTimeUs);
+ // TODO: add "mode" to performSeek.
+ performSeek(seekTimeUs/*, (MediaPlayerSeekMode)mode */);
return;
} else if (msg->what() == kWhatPollBuffering) {
onPollBuffering();
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index c7834ef..363f8bb 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -49,7 +49,9 @@
virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
virtual status_t getDuration(int64_t *durationUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/liboboe/README.md b/media/liboboe/README.md
new file mode 100644
index 0000000..80894c6
--- /dev/null
+++ b/media/liboboe/README.md
@@ -0,0 +1 @@
+Oboe Audio input/output API
diff --git a/media/liboboe/include/oboe/OboeAudio.h b/media/liboboe/include/oboe/OboeAudio.h
new file mode 100644
index 0000000..32fef1b
--- /dev/null
+++ b/media/liboboe/include/oboe/OboeAudio.h
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is the 'C' ABI for Oboe.
+ */
+#ifndef OBOE_OBOEAUDIO_H
+#define OBOE_OBOEAUDIO_H
+
+#include "OboeDefinitions.h"
+
+typedef int32_t OboeDeviceId;
+typedef oboe_handle_t OboeStream;
+typedef oboe_handle_t OboeStreamBuilder;
+typedef oboe_handle_t OboeThread;
+
+#define OBOE_STREAM_NONE ((OboeStream)OBOE_HANDLE_INVALID)
+#define OBOE_STREAM_BUILDER_NONE ((OboeStreamBuilder)OBOE_HANDLE_INVALID)
+
+
+// ============================================================
+// Audio System
+// ============================================================
+
+/**
+ * @return time in the same clock domain as the timestamps
+ */
+oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid);
+
+/**
+ * The text is the ASCII symbol corresponding to the returnCode,
+ * or an English message saying the returnCode is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an Oboe result code.
+ */
+const char * Oboe_convertResultToText(oboe_result_t returnCode);
+
+/**
+ * The text is the ASCII symbol corresponding to the stream state,
+ * or an English message saying the state is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an Oboe state.
+ */
+const char * Oboe_convertStreamStateToText(oboe_stream_state_t state);
+
+// ============================================================
+// StreamBuilder
+// ============================================================
+
+/**
+ * Create a StreamBuilder that can be used to open a Stream.
+ *
+ * The deviceId is initially unspecified, meaning that the current default device will be used.
+ *
+ * The default direction is OBOE_DIRECTION_OUTPUT.
+ * The default sharing mode is OBOE_SHARING_MODE_LEGACY.
+ * The data format, samplesPerFrames and sampleRate are unspecified and will be
+ * chosen by the device when it is opened.
+ *
+ * OboeStreamBuilder_delete() must be called when you are done using the builder.
+ */
+oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder);
+
+/**
+ * Request an audio device identified device using an ID.
+ * The ID is platform specific.
+ * On Android, for example, the ID could be obtained from the Java AudioManager.
+ *
+ * By default, the primary device will be used.
+ *
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder, OboeDeviceId deviceId);
+
+/**
+ * Request a sample rate in Hz.
+ * The stream may be opened with a different sample rate.
+ * So the application should query for the actual rate after the stream is opened.
+ *
+ * Technically, this should be called the "frame rate" or "frames per second",
+ * because it refers to the number of complete frames transferred per second.
+ * But it is traditionally called "sample rate". Se we use that term.
+ *
+ * Default is OBOE_UNSPECIFIED.
+ *
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
+ oboe_sample_rate_t sampleRate);
+
+/**
+ * Returns sample rate in Hertz (samples per second).
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
+ oboe_sample_rate_t *sampleRate);
+
+
+/**
+ * Request a number of samples per frame.
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * Default is OBOE_UNSPECIFIED.
+ *
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
+ int32_t samplesPerFrame);
+
+/**
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
+ int32_t *samplesPerFrame);
+
+
+/**
+ * Request a sample data format, for example OBOE_AUDIO_FORMAT_PCM16.
+ * The application should query for the actual format after the stream is opened.
+ *
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder, oboe_audio_format_t format);
+
+/**
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder, oboe_audio_format_t *format);
+
+/**
+ * Request a mode for sharing the device.
+ * The requested sharing mode may not be available.
+ * So the application should query for the actual mode after the stream is opened.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param sharingMode OBOE_SHARING_MODE_LEGACY or OBOE_SHARING_MODE_EXCLUSIVE
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
+ oboe_sharing_mode_t sharingMode);
+
+/**
+ * Return requested sharing mode.
+ * @return OBOE_OK or a negative error
+ */
+oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
+ oboe_sharing_mode_t *sharingMode);
+
+/**
+ * Request the direction for a stream. The default is OBOE_DIRECTION_OUTPUT.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param direction OBOE_DIRECTION_OUTPUT or OBOE_DIRECTION_INPUT
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
+ oboe_direction_t direction);
+
+/**
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param direction pointer to a variable to be set to the currently requested direction.
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
+ oboe_direction_t *direction);
+
+/**
+ * Open a stream based on the options in the StreamBuilder.
+ *
+ * OboeStream_close must be called when finished with the stream to recover
+ * the memory and to free the associated resources.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream handle
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_openStream(OboeStreamBuilder builder, OboeStream *stream);
+
+/**
+ * Delete the resources associated with the StreamBuilder.
+ *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStreamBuilder_delete(OboeStreamBuilder builder);
+
+// ============================================================
+// Stream Control
+// ============================================================
+
+/**
+ * Free the resources associated with a stream created by OboeStreamBuilder_openStream()
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_close(OboeStream stream);
+
+/**
+ * Asynchronously request to start playing the stream. For output streams, one should
+ * write to the stream to fill the buffer before starting.
+ * Otherwise it will underflow.
+ * After this call the state will be in OBOE_STREAM_STATE_STARTING or OBOE_STREAM_STATE_STARTED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_requestStart(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to pause.
+ * Pausing a stream will freeze the data flow but not flush any buffers.
+ * Use OboeStream_Start() to resume playback after a pause.
+ * After this call the state will be in OBOE_STREAM_STATE_PAUSING or OBOE_STREAM_STATE_PAUSED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_requestPause(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to flush.
+ * Flushing will discard any pending data.
+ * This call only works if the stream is pausing or paused. TODO review
+ * Frame counters are not reset by a flush. They may be advanced.
+ * After this call the state will be in OBOE_STREAM_STATE_FLUSHING or OBOE_STREAM_STATE_FLUSHED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_requestFlush(OboeStream stream);
+
+/**
+ * Asynchronous request for the stream to stop.
+ * The stream will stop after all of the data currently buffered has been played.
+ * After this call the state will be in OBOE_STREAM_STATE_STOPPING or OBOE_STREAM_STATE_STOPPED.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_requestStop(OboeStream stream);
+
+/**
+ * Query the current state, eg. OBOE_STREAM_STATE_PAUSING
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param state pointer to a variable that will be set to the current state
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state);
+
+/**
+ * Wait until the current state no longer matches the input state.
+ *
+ * <pre><code>
+ * oboe_stream_state_t currentState;
+ * oboe_result_t result = OboeStream_getState(stream, ¤tState);
+ * while (result == OBOE_OK && currentState != OBOE_STREAM_STATE_PAUSING) {
+ * result = OboeStream_waitForStateChange(
+ * stream, currentState, ¤tState, MY_TIMEOUT_NANOS);
+ * }
+ * </code></pre>
+ *
+ * @param stream A handle provided by OboeStreamBuilder_openStream()
+ * @param inputState The state we want to avoid.
+ * @param nextState Pointer to a variable that will be set to the new state.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
+ oboe_stream_state_t inputState,
+ oboe_stream_state_t *nextState,
+ oboe_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream I/O
+// ============================================================
+
+/**
+ * Read data from the stream.
+ *
+ * The call will wait until the read is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for data.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to read. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+oboe_result_t OboeStream_read(OboeStream stream,
+ void *buffer,
+ int32_t numFrames,
+ oboe_nanoseconds_t timeoutNanoseconds);
+
+/**
+ * Write data to the stream.
+ *
+ * The call will wait until the write is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for room in the buffer.
+ *
+ * @param stream A stream created using OboeStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to write. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+oboe_result_t OboeStream_write(OboeStream stream,
+ const void *buffer,
+ int32_t numFrames,
+ oboe_nanoseconds_t timeoutNanoseconds);
+
+
+// ============================================================
+// High priority audio threads
+// ============================================================
+
+/**
+ * Create a thread with special properties for low latency audio performance.
+ * This thread can be used to implement a callback API.
+ *
+ * Note that this API is in flux.
+ *
+ * @param threadHandlePtr a pointer to receive a thread handle
+ * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
+ * @param start_routine your thread entry point
+ * @param arg an argument that will be passed to your thread entry point
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t Oboe_createAudioThread(OboeThread *threadHandlePtr,
+ oboe_nanoseconds_t periodNanoseconds,
+ void *(*start_routine)(void *), void *arg);
+
+/**
+ * Wait until the thread exits or an error occurs.
+ * The thread handle will be deleted.
+ *
+ * @param thread the thread handle passed back from Oboe_createAudioThread()
+ * @param returnArg a pointer to a variable to receive the return value
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t Oboe_joinAudioThread(OboeThread thread,
+ void **returnArg,
+ oboe_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+
+/**
+ * This can be used to adjust the latency of the buffer by changing
+ * the threshold where blocking will occur.
+ * By combining this with OboeStream_getUnderrunCount(), the latency can be tuned
+ * at run-time for each device.
+ *
+ * This cannot be set higher than OboeStream_getBufferCapacity().
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames requested number of frames that can be filled without blocking
+ * @return actual number of frames or a negative error
+ */
+oboe_result_t OboeStream_setBufferSize(OboeStream stream, oboe_size_frames_t frames);
+
+/**
+ * Query the maximum number of frames that can be filled without blocking.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer size
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * Query the number of frames that are read or written by the endpoint at one time.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the burst size
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * Query maximum buffer capacity in frames.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer capacity
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getBufferCapacity(OboeStream stream, oboe_size_frames_t *frames);
+
+/**
+ * An XRun is an Underrun or an Overrun.
+ * During playing, an underrun will occur if the stream is not written in time
+ * and the system runs out of valid data.
+ * During recording, an overrun will occur if the stream is not read in time
+ * and there is no place to put the incoming data so it is discarded.
+ *
+ * An underrun or overrun can cause an audible "pop" or "glitch".
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param xRunCount pointer to variable to receive the underrun or overrun count
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param sampleRate pointer to variable to receive the actual sample rate
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getSampleRate(OboeStream stream, int32_t *sampleRate);
+
+/**
+ * The samplesPerFrame is also known as channelCount.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param samplesPerFrame pointer to variable to receive the actual samples per frame
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param format pointer to variable to receive the actual data format
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format);
+
+/**
+ * Provide actual sharing mode.
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param sharingMode pointer to variable to receive the actual sharing mode
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getSharingMode(OboeStream stream,
+ oboe_sharing_mode_t *sharingMode);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param direction pointer to a variable to be set to the current direction.
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getDirection(OboeStream stream, uint32_t *direction);
+
+/**
+ * Passes back the number of frames that have been written since the stream was created.
+ * For an output stream, this will be advanced by the application calling write().
+ * For an input stream, this will be advanced by the device or service.
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getFramesWritten(OboeStream stream, oboe_position_frames_t *frames);
+
+/**
+ * Passes back the number of frames that have been read since the stream was created.
+ * For an output stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the application calling read().
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return OBOE_OK or a negative error.
+ */
+oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames);
+
+/**
+ * Passes back the time at which a particular frame was presented.
+ * This can be used to synchronize audio with video or MIDI.
+ * It can also be used to align a recorded stream with a playback stream.
+ *
+ * Timestamps are only valid when the stream is in OBOE_STREAM_STATE_STARTED.
+ * OBOE_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Note that because requestStart() is asynchronous, timestamps will not be valid until
+ * a short time after calling requestStart().
+ * So OBOE_ERROR_INVALID_STATE should not be considered a fatal error.
+ * Just try calling again later.
+ *
+ * If an error occurs, then the position and time will not be modified.
+ *
+ * The position and time passed back are monotonically increasing.
+ *
+ * @param stream A handle provided by OboeStreamBuilder_openStream()
+ * @param clockId OBOE_CLOCK_MONOTONIC or OBOE_CLOCK_BOOTTIME
+ * @param framePosition pointer to a variable to receive the position
+ * @param timeNanoseconds pointer to a variable to receive the time
+ * @return OBOE_OK or a negative error
+ */
+oboe_result_t OboeStream_getTimestamp(OboeStream stream,
+ oboe_clockid_t clockid,
+ oboe_position_frames_t *framePosition,
+ oboe_nanoseconds_t *timeNanoseconds);
+
+#endif //NATIVEOBOE_OBOEAUDIO_H
diff --git a/media/liboboe/include/oboe/OboeDefinitions.h b/media/liboboe/include/oboe/OboeDefinitions.h
new file mode 100644
index 0000000..b3e2deb
--- /dev/null
+++ b/media/liboboe/include/oboe/OboeDefinitions.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOEDEFINITIONS_H
+#define OBOE_OBOEDEFINITIONS_H
+
+#include <stdint.h>
+
+typedef int32_t oboe_handle_t;
+typedef int32_t oboe_result_t;
+typedef int32_t oboe_sample_rate_t;
+/** This is used for small quantities such as the number of frames in a buffer. */
+typedef int32_t oboe_size_frames_t;
+/** This is used for large quantities, such as the number of frames that have
+ * been played since a stream was started.
+ * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
+ */
+typedef int64_t oboe_position_frames_t;
+
+typedef int64_t oboe_nanoseconds_t;
+typedef uint32_t oboe_audio_format_t;
+
+/**
+ * This is used to represent a value that has not been specified.
+ * For example, an application could use OBOE_UNSPECIFIED to indicate
+ * that is did not not care what the specific value of a parameter was
+ * and would accept whatever it was given.
+ */
+#define OBOE_UNSPECIFIED 0
+#define OBOE_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define OBOE_NANOS_PER_MILLISECOND (OBOE_NANOS_PER_MICROSECOND * 1000)
+#define OBOE_MILLIS_PER_SECOND 1000
+#define OBOE_NANOS_PER_SECOND (OBOE_NANOS_PER_MILLISECOND * OBOE_MILLIS_PER_SECOND)
+
+#define OBOE_HANDLE_INVALID ((oboe_handle_t)-1)
+
+enum oboe_direction_t {
+ OBOE_DIRECTION_OUTPUT,
+ OBOE_DIRECTION_INPUT,
+ OBOE_DIRECTION_COUNT // This should always be last.
+};
+
+enum oboe_datatype_t {
+ OBOE_AUDIO_DATATYPE_INT16,
+ OBOE_AUDIO_DATATYPE_INT32,
+ OBOE_AUDIO_DATATYPE_INT824,
+ OBOE_AUDIO_DATATYPE_UINT8,
+ OBOE_AUDIO_DATATYPE_FLOAT32, // Add new values below.
+ OBOE_AUDIO_DATATYPE_COUNT // This should always be last.
+};
+
+enum oboe_content_t {
+ OBOE_AUDIO_CONTENT_PCM,
+ OBOE_AUDIO_CONTENT_MP3,
+ OBOE_AUDIO_CONTENT_AAC,
+ OBOE_AUDIO_CONTENT_AC3,
+ OBOE_AUDIO_CONTENT_EAC3,
+ OBOE_AUDIO_CONTENT_DTS,
+ OBOE_AUDIO_CONTENT_DTSHD, // Add new values below.
+ OBOE_AUDIO_CONTENT_COUNT // This should always be last.
+};
+
+enum oboe_wrapper_t {
+ OBOE_AUDIO_WRAPPER_NONE,
+ OBOE_AUDIO_WRAPPER_IEC61937, // Add new values below.
+ OBOE_AUDIO_WRAPPER_COUNT // This should always be last.
+};
+
+/**
+ * Fields packed into oboe_audio_format_t, from most to least significant bits.
+ * Reserved:8
+ * Wrapper:8
+ * Content:8
+ * Data Type:8
+ */
+#define OBOE_AUDIO_FORMAT(dataType, content, wrapper) \
+ ((oboe_audio_format_t)((wrapper << 16) | (content << 8) | dataType))
+
+#define OBOE_AUDIO_FORMAT_RAW(dataType, content) \
+ OBOE_AUDIO_FORMAT(dataType, content, OBOE_AUDIO_WRAPPER_NONE)
+
+#define OBOE_AUDIO_FORMAT_DATA_TYPE(format) \
+ (format & 0x0FF)
+
+// Define some common formats.
+#define OBOE_AUDIO_FORMAT_PCM16 \
+ OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT16, OBOE_AUDIO_CONTENT_PCM)
+#define OBOE_AUDIO_FORMAT_PCM_FLOAT \
+ OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_FLOAT32, OBOE_AUDIO_CONTENT_PCM)
+#define OBOE_AUDIO_FORMAT_PCM824 \
+ OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT824, OBOE_AUDIO_CONTENT_PCM)
+
+enum {
+ OBOE_OK,
+ OBOE_ERROR_BASE = -900, // TODO review
+ OBOE_ERROR_DISCONNECTED,
+ OBOE_ERROR_ILLEGAL_ARGUMENT,
+ OBOE_ERROR_INCOMPATIBLE,
+ OBOE_ERROR_INTERNAL, // an underlying API returned an error code
+ OBOE_ERROR_INVALID_STATE,
+ OBOE_ERROR_UNEXPECTED_STATE,
+ OBOE_ERROR_UNEXPECTED_VALUE,
+ OBOE_ERROR_INVALID_HANDLE,
+ OBOE_ERROR_INVALID_QUERY,
+ OBOE_ERROR_UNIMPLEMENTED,
+ OBOE_ERROR_UNAVAILABLE,
+ OBOE_ERROR_NO_FREE_HANDLES,
+ OBOE_ERROR_NO_MEMORY,
+ OBOE_ERROR_NULL,
+ OBOE_ERROR_TIMEOUT,
+ OBOE_ERROR_WOULD_BLOCK,
+ OBOE_ERROR_INVALID_ORDER
+};
+
+typedef enum {
+ OBOE_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
+ OBOE_CLOCK_BOOTTIME, // Clock since booted, runs all the time.
+ OBOE_CLOCK_COUNT // This should always be last.
+} oboe_clockid_t;
+
+typedef enum
+{
+ OBOE_STREAM_STATE_UNINITIALIZED = 0,
+ OBOE_STREAM_STATE_OPEN,
+ OBOE_STREAM_STATE_STARTING,
+ OBOE_STREAM_STATE_STARTED,
+ OBOE_STREAM_STATE_PAUSING,
+ OBOE_STREAM_STATE_PAUSED,
+ OBOE_STREAM_STATE_FLUSHING,
+ OBOE_STREAM_STATE_FLUSHED,
+ OBOE_STREAM_STATE_STOPPING,
+ OBOE_STREAM_STATE_STOPPED,
+ OBOE_STREAM_STATE_CLOSING,
+ OBOE_STREAM_STATE_CLOSED,
+} oboe_stream_state_t;
+
+// TODO review API
+typedef enum {
+ /**
+ * This will use an AudioTrack object for playing audio
+ * and an AudioRecord for recording data.
+ */
+ OBOE_SHARING_MODE_LEGACY,
+ /**
+ * This will be the only stream using a particular source or sink.
+ * This mode will provide the lowest possible latency.
+ * You should close EXCLUSIVE streams immediately when you are not using them.
+ */
+ OBOE_SHARING_MODE_EXCLUSIVE,
+ /**
+ * Multiple applications will be mixed by the Oboe Server.
+ * This will have higher latency than the EXCLUSIVE mode.
+ */
+ OBOE_SHARING_MODE_SHARED,
+ /**
+ * Multiple applications will do their own mixing into a memory mapped buffer.
+ * It may be possible for malicious applications to read the data produced by
+ * other apps. So do not use this for private data such as telephony or messaging.
+ */
+ OBOE_SHARING_MODE_PUBLIC_MIX,
+ OBOE_SHARING_MODE_COUNT // This should always be last.
+} oboe_sharing_mode_t;
+
+#endif // OBOE_OBOEDEFINITIONS_H
diff --git a/media/liboboe/include/oboe/README.md b/media/liboboe/include/oboe/README.md
new file mode 100644
index 0000000..de60d03
--- /dev/null
+++ b/media/liboboe/include/oboe/README.md
@@ -0,0 +1,4 @@
+Oboe Audio headers
+
+This folder contains the public header files.
+
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 094f5cc..a0c8ace 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -526,9 +526,6 @@
mChannelMaskPresent(false),
mChannelMask(0),
mDequeueCounter(0),
- mInputMetadataType(kMetadataBufferTypeInvalid),
- mOutputMetadataType(kMetadataBufferTypeInvalid),
- mLegacyAdaptiveExperiment(false),
mMetadataBuffersToSubmit(0),
mNumUndequeuedBuffers(0),
mRepeatFrameDelayUs(-1ll),
@@ -556,6 +553,9 @@
mPortEOS[kPortIndexInput] = mPortEOS[kPortIndexOutput] = false;
mInputEOSResult = OK;
+ mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
+
memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
changeState(mUninitializedState);
@@ -691,8 +691,7 @@
int usageBits = 0;
// no need to reconnect as we will not dequeue all buffers
status_t err = setupNativeWindowSizeFormatAndUsage(
- nativeWindow, &usageBits,
- !storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment /* reconnect */);
+ nativeWindow, &usageBits, !storingMetadataInDecodedBuffers());
if (err != OK) {
return err;
}
@@ -742,7 +741,6 @@
const BufferInfo &info = buffers[i];
// skip undequeued buffers for meta data mode
if (storingMetadataInDecodedBuffers()
- && !mLegacyAdaptiveExperiment
&& info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
ALOGV("skipping buffer");
continue;
@@ -759,7 +757,7 @@
}
// cancel undequeued buffers to new surface
- if (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment) {
+ if (!storingMetadataInDecodedBuffers()) {
for (size_t i = 0; i < buffers.size(); ++i) {
BufferInfo &info = buffers.editItemAt(i);
if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
@@ -789,6 +787,21 @@
return OK;
}
+status_t ACodec::setPortMode(int32_t portIndex, IOMX::PortMode mode) {
+ status_t err = mOMXNode->setPortMode(portIndex, mode);
+ if (err != OK) {
+ ALOGE("[%s] setPortMode on %s to %s failed w/ err %d",
+ mComponentName.c_str(),
+ portIndex == kPortIndexInput ? "input" : "output",
+ asString(mode),
+ err);
+ return err;
+ }
+
+ mPortMode[portIndex] = mode;
+ return OK;
+}
+
status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
@@ -797,7 +810,7 @@
status_t err;
if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
- if (storingMetadataInDecodedBuffers() && !mLegacyAdaptiveExperiment) {
+ if (storingMetadataInDecodedBuffers()) {
err = allocateOutputMetadataBuffers();
} else {
err = allocateOutputBuffersFromNativeWindow();
@@ -811,26 +824,17 @@
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
- MetadataBufferType type =
- portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
+ const IOMX::PortMode &mode = mPortMode[portIndex];
size_t bufSize = def.nBufferSize;
- if (type == kMetadataBufferTypeANWBuffer) {
+ // Always allocate VideoNativeMetadata if using ANWBuffer.
+ // OMX might use gralloc source internally, but we don't share
+ // metadata buffer with OMX, OMX has its own headers.
+ if (mode == IOMX::kPortModeDynamicANWBuffer) {
bufSize = sizeof(VideoNativeMetadata);
- } else if (type == kMetadataBufferTypeNativeHandleSource) {
+ } else if (mode == IOMX::kPortModeDynamicNativeHandle) {
bufSize = sizeof(VideoNativeHandleMetadata);
}
- // If using gralloc or native source input metadata buffers, allocate largest
- // metadata size as we prefer to generate native source metadata, but component
- // may require gralloc source. For camera source, allocate at least enough
- // size for native metadata buffers.
- size_t allottedSize = bufSize;
- if (portIndex == kPortIndexInput && type == kMetadataBufferTypeANWBuffer) {
- bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
- } else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
- bufSize = max(bufSize, sizeof(VideoNativeMetadata));
- }
-
size_t conversionBufferSize = 0;
sp<DataConverter> converter = mConverter[portIndex];
@@ -845,9 +849,9 @@
size_t alignment = MemoryDealer::getAllocationAlignment();
- ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
+ ALOGV("[%s] Allocating %u buffers of size %zu (from %u using %s) on %s port",
mComponentName.c_str(),
- def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
+ def.nBufferCountActual, bufSize, def.nBufferSize, asString(mode),
portIndex == kPortIndexInput ? "input" : "output");
// verify buffer sizes to avoid overflow in align()
@@ -865,24 +869,21 @@
}
size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
- mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+ if (mode != IOMX::kPortModePresetSecureBuffer) {
+ mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+ }
const sp<AMessage> &format =
portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
- sp<IMemory> mem = mDealer[portIndex]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
- }
+ sp<IMemory> mem;
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_US;
info.mFenceFd = -1;
info.mRenderInfo = NULL;
- if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
- mem.clear();
-
+ if (mode == IOMX::kPortModePresetSecureBuffer) {
void *ptr = NULL;
sp<NativeHandle> native_handle;
err = mOMXNode->allocateSecureBuffer(
@@ -894,18 +895,20 @@
: new SecureBuffer(format, native_handle, bufSize);
info.mCodecData = info.mData;
} else {
- err = mOMXNode->useBuffer(portIndex,
- OMXBuffer(mem, allottedSize), &info.mBufferID);
- }
+ mem = mDealer[portIndex]->allocate(bufSize);
+ if (mem == NULL || mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
- if (mem != NULL) {
- info.mCodecData = new SharedMemoryBuffer(format, mem);
- info.mCodecRef = mem;
+ err = mOMXNode->useBuffer(portIndex, mem, &info.mBufferID);
- if (type == kMetadataBufferTypeANWBuffer) {
+ if (mode == IOMX::kPortModeDynamicANWBuffer) {
((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
}
+ info.mCodecData = new SharedMemoryBuffer(format, mem);
+ info.mCodecRef = mem;
+
// if we require conversion, allocate conversion buffer for client use;
// otherwise, reuse codec buffer
if (mConverter[portIndex] != NULL) {
@@ -931,20 +934,12 @@
return err;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
- notify->setInt32("portIndex", portIndex);
-
sp<PortDescription> desc = new PortDescription;
-
for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
const BufferInfo &info = mBuffers[portIndex][i];
desc->addBuffer(info.mBufferID, info.mData);
}
-
- notify->setObject("portDesc", desc);
- notify->post();
+ mCallback->onBuffersAllocated(portIndex, desc);
return OK;
}
@@ -1086,10 +1081,9 @@
}
status_t ACodec::allocateOutputBuffersFromNativeWindow() {
- // This method only handles the non-metadata mode, or legacy metadata mode
- // (where the headers for each buffer id will be fixed). Non-legacy metadata
- // mode shouldn't go through this path.
- CHECK(!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment);
+ // This method only handles the non-metadata mode (or simulating legacy
+ // mode with metadata, which is transparent to ACodec).
+ CHECK(!storingMetadataInDecodedBuffers());
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
@@ -1126,10 +1120,8 @@
// TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
// OMX doesn't use the shared memory buffer, but some code still
// access info.mData. Create an ABuffer as a placeholder.
- if (storingMetadataInDecodedBuffers()) {
- info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
- info.mCodecData = info.mData;
- }
+ info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
+ info.mCodecData = info.mData;
mBuffers[kPortIndexOutput].push(info);
@@ -1151,7 +1143,7 @@
OMX_U32 cancelStart;
OMX_U32 cancelEnd;
- if (err != 0 || storingMetadataInDecodedBuffers()) {
+ if (err != OK) {
// If an error occurred while dequeuing we need to cancel any buffers
// that were dequeued. Also cancel all if we're in legacy metadata mode.
cancelStart = 0;
@@ -1175,32 +1167,23 @@
static_cast<Surface*>(mNativeWindow.get())
->getIGraphicBufferProducer()->allowAllocation(false);
- if (storingMetadataInDecodedBuffers()) {
- mMetadataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
- }
-
return err;
}
status_t ACodec::allocateOutputMetadataBuffers() {
- CHECK(storingMetadataInDecodedBuffers() && !mLegacyAdaptiveExperiment);
+ CHECK(storingMetadataInDecodedBuffers());
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
&bufferCount, &bufferSize, &minUndequeuedBuffers,
false /* preregister */);
- if (err != 0)
+ if (err != OK)
return err;
mNumUndequeuedBuffers = minUndequeuedBuffers;
ALOGV("[%s] Allocating %u meta buffers on output port",
mComponentName.c_str(), bufferCount);
- size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
- sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
- size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
- mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
-
for (OMX_U32 i = 0; i < bufferCount; i++) {
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
@@ -1209,23 +1192,18 @@
info.mGraphicBuffer = NULL;
info.mDequeuedAt = mDequeueCounter;
- sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
- }
- if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
- ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
- }
- info.mData = new SharedMemoryBuffer(mOutputFormat, mem);
- info.mMemRef = mem;
- info.mCodecData = info.mData;
- info.mCodecRef = mem;
+ info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
- err = mOMXNode->useBuffer(kPortIndexOutput, mem, &info.mBufferID);
+ // Initialize fence fd to -1 to avoid warning in freeBuffer().
+ ((VideoNativeMetadata *)info.mData->base())->nFenceFd = -1;
+
+ info.mCodecData = info.mData;
+
+ err = mOMXNode->useBuffer(kPortIndexOutput, OMXBuffer::sPreset, &info.mBufferID);
mBuffers[kPortIndexOutput].push(info);
- ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
- mComponentName.c_str(), info.mBufferID, mem->pointer());
+ ALOGV("[%s] allocated meta buffer with ID %u",
+ mComponentName.c_str(), info.mBufferID);
}
mMetadataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
@@ -1323,8 +1301,6 @@
}
void ACodec::notifyOfRenderedFrames(bool dropIncomplete, FrameRenderTracker::Info *until) {
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", CodecBase::kWhatOutputFramesRendered);
std::list<FrameRenderTracker::Info> done =
mRenderTracker.checkFencesAndGetRenderedFrames(until, dropIncomplete);
@@ -1340,9 +1316,7 @@
}
}
- if (MediaCodec::CreateFramesRenderedMessage(done, msg)) {
- msg->post();
- }
+ mCallback->onOutputFramesRendered(done);
}
ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
@@ -1401,7 +1375,7 @@
// same is possible in meta mode, in which case, it will be treated
// as a normal buffer, which is not desirable.
// TODO: fix this.
- if (!stale && (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment)) {
+ if (!stale && !storingMetadataInDecodedBuffers()) {
ALOGI("dequeued unrecognized (stale) buffer %p. discarding", buf);
stale = true;
}
@@ -1432,12 +1406,6 @@
// while loop above does not complete
CHECK(storingMetadataInDecodedBuffers());
- if (storingMetadataInDecodedBuffers() && mLegacyAdaptiveExperiment) {
- // If we're here while running legacy experiment, we dequeued some
- // unrecognized buffers, and the experiment can't continue.
- ALOGE("Legacy experiment failed, drop back to metadata mode");
- mLegacyAdaptiveExperiment = false;
- }
// discard buffer in LRU info and replace with new buffer
oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
oldest->mStatus = BufferInfo::OWNED_BY_US;
@@ -1445,23 +1413,10 @@
mRenderTracker.untrackFrame(oldest->mRenderInfo);
oldest->mRenderInfo = NULL;
- if (mOutputMetadataType == kMetadataBufferTypeGrallocSource) {
- VideoGrallocMetadata *grallocMeta =
- reinterpret_cast<VideoGrallocMetadata *>(oldest->mCodecData->base());
- ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
- (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
- mDequeueCounter - oldest->mDequeuedAt,
- (void *)(uintptr_t)grallocMeta->pHandle,
- oldest->mGraphicBuffer->handle, oldest->mCodecData->base());
- } else if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
- VideoNativeMetadata *nativeMeta =
- reinterpret_cast<VideoNativeMetadata *>(oldest->mCodecData->base());
- ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
- (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
- mDequeueCounter - oldest->mDequeuedAt,
- (void *)(uintptr_t)nativeMeta->pBuffer,
- oldest->mGraphicBuffer->getNativeBuffer(), oldest->mCodecData->base());
- }
+ ALOGV("replaced oldest buffer #%u with age %u, graphicBuffer %p",
+ (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
+ mDequeueCounter - oldest->mDequeuedAt,
+ oldest->mGraphicBuffer->getNativeBuffer());
updateRenderInfoForDequeuedBuffer(buf, fenceFd, oldest);
return oldest;
@@ -1508,9 +1463,7 @@
status_t err = OK;
// there should not be any fences in the metadata
- MetadataBufferType type =
- portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
- if (type == kMetadataBufferTypeANWBuffer && info->mCodecData != NULL
+ if (mPortMode[portIndex] == IOMX::kPortModeDynamicANWBuffer && info->mCodecData != NULL
&& info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
int fenceFd = ((VideoNativeMetadata *)info->mCodecData->base())->nFenceFd;
if (fenceFd >= 0) {
@@ -1569,7 +1522,7 @@
status_t ACodec::fillBuffer(BufferInfo *info) {
status_t err;
- if (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment) {
+ if (!storingMetadataInDecodedBuffers()) {
err = mOMXNode->fillBuffer(
info->mBufferID, OMXBuffer::sPreset, info->mFenceFd);
} else {
@@ -1611,8 +1564,8 @@
mIsEncoder = encoder;
- mInputMetadataType = kMetadataBufferTypeInvalid;
- mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
status_t err = setComponentRole(encoder /* isEncoder */, mime);
@@ -1639,18 +1592,18 @@
if (encoder
&& msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
&& storeMeta != kMetadataBufferTypeInvalid) {
- mInputMetadataType = (MetadataBufferType)storeMeta;
- err = mOMXNode->storeMetaDataInBuffers(
- kPortIndexInput, OMX_TRUE, &mInputMetadataType);
+ IOMX::PortMode mode;
+ if (storeMeta == kMetadataBufferTypeNativeHandleSource) {
+ mode = IOMX::kPortModeDynamicNativeHandle;
+ } else if (storeMeta == kMetadataBufferTypeANWBuffer ||
+ storeMeta == kMetadataBufferTypeGrallocSource) {
+ mode = IOMX::kPortModeDynamicANWBuffer;
+ } else {
+ return BAD_VALUE;
+ }
+ err = setPortMode(kPortIndexInput, mode);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
- mComponentName.c_str(), err);
-
return err;
- } else if (storeMeta == kMetadataBufferTypeANWBuffer
- && mInputMetadataType == kMetadataBufferTypeGrallocSource) {
- // IOMX translates ANWBuffers to gralloc source already.
- mInputMetadataType = (MetadataBufferType)storeMeta;
}
uint32_t usageBits;
@@ -1695,12 +1648,14 @@
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
&& msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
+ if (mFlags & kFlagIsSecure) {
+ enable = OMX_TRUE;
+ }
- mOutputMetadataType = kMetadataBufferTypeNativeHandleSource;
- err = mOMXNode->storeMetaDataInBuffers(kPortIndexOutput, enable, &mOutputMetadataType);
+ err = setPortMode(kPortIndexOutput, enable ?
+ IOMX::kPortModePresetSecureBuffer : IOMX::kPortModePresetByteBuffer);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
- mComponentName.c_str(), err);
+ return err;
}
if (!msg->findInt64(
@@ -1737,7 +1692,6 @@
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && video && !encoder;
mUsingNativeWindow = haveNativeWindow;
- mLegacyAdaptiveExperiment = false;
if (video && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
@@ -1753,10 +1707,13 @@
if (mFlags & kFlagIsSecure) {
// use native_handles for secure input buffers
- err = mOMXNode->enableNativeBuffers(
- kPortIndexInput, OMX_FALSE /* graphic */, OMX_TRUE);
- ALOGI_IF(err != OK, "falling back to non-native_handles");
- err = OK; // ignore error for now
+ err = setPortMode(kPortIndexInput, IOMX::kPortModePresetSecureBuffer);
+
+ if (err != OK) {
+ ALOGI("falling back to non-native_handles");
+ setPortMode(kPortIndexInput, IOMX::kPortModePresetByteBuffer);
+ err = OK; // ignore error for now
+ }
}
}
if (haveNativeWindow) {
@@ -1828,14 +1785,8 @@
return err;
}
- // Always try to enable dynamic output buffers on native surface
- mOutputMetadataType = kMetadataBufferTypeANWBuffer;
- err = mOMXNode->storeMetaDataInBuffers(
- kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
- mComponentName.c_str(), err);
-
// if adaptive playback has been requested, try JB fallback
// NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
// LARGE MEMORY REQUIREMENT
@@ -1880,12 +1831,9 @@
// allow failure
err = OK;
} else {
- ALOGV("[%s] storeMetaDataInBuffers succeeded",
- mComponentName.c_str());
+ ALOGV("[%s] setPortMode on output to %s succeeded",
+ mComponentName.c_str(), asString(IOMX::kPortModeDynamicANWBuffer));
CHECK(storingMetadataInDecodedBuffers());
- mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled(
- "legacy-adaptive", !msg->contains("no-experiments"));
-
inputFormat->setInt32("adaptive-playback", true);
}
@@ -1914,6 +1862,12 @@
if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
usingSwRenderer = true;
haveNativeWindow = false;
+ (void)setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+ } else if (haveNativeWindow && !storingMetadataInDecodedBuffers()) {
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetANWBuffer);
+ if (err != OK) {
+ return err;
+ }
}
if (encoder) {
@@ -1928,17 +1882,8 @@
if (haveNativeWindow) {
mNativeWindow = static_cast<Surface *>(obj.get());
- }
- // initialize native window now to get actual output format
- // TODO: this is needed for some encoders even though they don't use native window
- err = initNativeWindow();
- if (err != OK) {
- return err;
- }
-
- // fallback for devices that do not handle flex-YUV for native buffers
- if (haveNativeWindow) {
+ // fallback for devices that do not handle flex-YUV for native buffers
int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
if (msg->findInt32("color-format", &requestedColorFormat) &&
requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
@@ -1964,18 +1909,10 @@
mNativeWindowUsageBits = 0;
haveNativeWindow = false;
usingSwRenderer = true;
- if (storingMetadataInDecodedBuffers()) {
- err = mOMXNode->storeMetaDataInBuffers(
- kPortIndexOutput, OMX_FALSE, &mOutputMetadataType);
- mOutputMetadataType = kMetadataBufferTypeInvalid; // just in case
- // TODO: implement adaptive-playback support for bytebuffer mode.
- // This is done by SW codecs, but most HW codecs don't support it.
- inputFormat->setInt32("adaptive-playback", false);
- }
- if (err == OK) {
- err = mOMXNode->enableNativeBuffers(
- kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
- }
+ // TODO: implement adaptive-playback support for bytebuffer mode.
+ // This is done by SW codecs, but most HW codecs don't support it.
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+ inputFormat->setInt32("adaptive-playback", false);
if (mFlags & kFlagIsGrallocUsageProtected) {
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
@@ -4525,15 +4462,6 @@
return err;
}
-status_t ACodec::initNativeWindow() {
- if (mNativeWindow != NULL) {
- return mOMXNode->enableNativeBuffers(kPortIndexOutput, OMX_TRUE /* graphic */, OMX_TRUE);
- }
-
- mOMXNode->enableNativeBuffers(kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
- return OK;
-}
-
size_t ACodec::countBuffersOwnedByComponent(OMX_U32 portIndex) const {
size_t n = 0;
@@ -5178,8 +5106,6 @@
}
void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatError);
ALOGE("signalError(omxError %#x, internalError %d)", error, internalError);
if (internalError == UNKNOWN_ERROR) { // find better error code
@@ -5192,10 +5118,7 @@
}
mFatalError = true;
-
- notify->setInt32("err", internalError);
- notify->setInt32("actionCode", ACTION_CODE_FATAL); // could translate from OMX error.
- notify->post();
+ mCallback->onError(internalError, ACTION_CODE_FATAL);
}
status_t ACodec::requestIDRFrame() {
@@ -5322,9 +5245,7 @@
status_t err = mCodec->mOMXNode->freeNode();
ALOGE_IF("[%s] failed to release codec instance: err=%d",
mCodec->mComponentName.c_str(), err);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ mCodec->mCallback->onReleaseCompleted();
break;
}
@@ -5548,20 +5469,11 @@
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
-
- notify->setObject("buffer", info->mData->clone(mCodec->mInputFormat));
- info->mData.clear();
-
+ info->mData->setFormat(mCodec->mInputFormat);
sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec);
reply->setInt32("buffer-id", info->mBufferID);
-
- notify->setMessage("reply", reply);
-
- notify->post();
-
+ mCodec->mCallback->fillThisBuffer(info->mBufferID, info->mData, reply);
+ info->mData.clear();
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
}
@@ -5572,9 +5484,8 @@
int32_t err = OK;
bool eos = false;
PortMode mode = getPortMode(kPortIndexInput);
-
- sp<RefBase> obj;
- if (!msg->findObject("buffer", &obj)) {
+ int32_t discarded = 0;
+ if (msg->findInt32("discarded", &discarded) && discarded) {
/* these are unfilled buffers returned by client */
CHECK(msg->findInt32("err", &err));
@@ -5586,9 +5497,10 @@
mCodec->mComponentName.c_str(), err);
eos = true;
}
- } else {
- buffer = static_cast<MediaCodecBuffer *>(obj.get());
}
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ buffer = static_cast<MediaCodecBuffer *>(obj.get());
int32_t tmp;
if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
@@ -5634,7 +5546,6 @@
OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
- MetadataBufferType metaType = mCodec->mInputMetadataType;
int32_t isCSD = 0;
if (buffer->meta()->findInt32("csd", &isCSD) && isCSD != 0) {
if (mCodec->mIsLegacyVP9Decoder) {
@@ -5644,7 +5555,6 @@
break;
}
flags |= OMX_BUFFERFLAG_CODECCONFIG;
- metaType = kMetadataBufferTypeInvalid;
}
if (eos) {
@@ -5710,15 +5620,17 @@
info->checkReadFence("onInputBufferFilled");
status_t err2 = OK;
- switch (metaType) {
- case kMetadataBufferTypeInvalid:
+ switch (mCodec->mPortMode[kPortIndexInput]) {
+ case IOMX::kPortModePresetByteBuffer:
+ case IOMX::kPortModePresetANWBuffer:
+ case IOMX::kPortModePresetSecureBuffer:
{
err2 = mCodec->mOMXNode->emptyBuffer(
bufferID, info->mCodecData, flags, timeUs, info->mFenceFd);
}
break;
#ifndef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
- case kMetadataBufferTypeNativeHandleSource:
+ case IOMX::kPortModeDynamicNativeHandle:
if (info->mCodecData->size() >= sizeof(VideoNativeHandleMetadata)) {
VideoNativeHandleMetadata *vnhmd =
(VideoNativeHandleMetadata*)info->mCodecData->base();
@@ -5728,7 +5640,7 @@
bufferID, handle, flags, timeUs, info->mFenceFd);
}
break;
- case kMetadataBufferTypeANWBuffer:
+ case IOMX::kPortModeDynamicANWBuffer:
if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
@@ -5740,7 +5652,8 @@
#endif
default:
ALOGW("Can't marshall %s data in %zu sized buffers in %zu-bit mode",
- asString(metaType), info->mCodecData->size(),
+ asString(mCodec->mPortMode[kPortIndexInput]),
+ info->mCodecData->size(),
sizeof(buffer_handle_t) * 8);
err2 = ERROR_UNSUPPORTED;
break;
@@ -5910,9 +5823,8 @@
break;
}
- sp<AMessage> reply =
- new AMessage(kWhatOutputBufferDrained, mCodec);
- sp<MediaCodecBuffer> buffer = info->mData->clone(mCodec->mOutputFormat);
+ sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, mCodec);
+ sp<MediaCodecBuffer> buffer = info->mData;
if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
// pretend that output format has changed on the first frame (we used to do this)
@@ -5926,18 +5838,17 @@
// data space) so that we can set it if and once the buffer is rendered.
mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
}
+ buffer->setFormat(mCodec->mOutputFormat);
- if (mCodec->usingMetadataOnEncoderOutput()) {
+ if (mCodec->usingSecureBufferOnEncoderOutput()) {
native_handle_t *handle = NULL;
- VideoNativeHandleMetadata &nativeMeta =
- *(VideoNativeHandleMetadata *)buffer->data();
- if (buffer->size() >= sizeof(nativeMeta)
- && nativeMeta.eType == kMetadataBufferTypeNativeHandleSource) {
+ sp<SecureBuffer> secureBuffer = static_cast<SecureBuffer *>(buffer.get());
+ if (secureBuffer != NULL) {
#ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
// handle is only valid on 32-bit/mediaserver process
handle = NULL;
#else
- handle = (native_handle_t *)nativeMeta.pHandle;
+ handle = (native_handle_t *)secureBuffer->getDestinationPointer();
#endif
}
buffer->meta()->setPointer("handle", handle);
@@ -5968,29 +5879,18 @@
}
buffer->meta()->setInt64("timeUs", timeUs);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
- notify->setObject("buffer", buffer);
info->mData.clear();
- notify->setInt32("flags", flags);
reply->setInt32("buffer-id", info->mBufferID);
- notify->setMessage("reply", reply);
-
- notify->post();
+ mCodec->mCallback->drainThisBuffer(info->mBufferID, buffer, flags, reply);
info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
if (flags & OMX_BUFFERFLAG_EOS) {
ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str());
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatEOS);
- notify->setInt32("err", mCodec->mInputEOSResult);
- notify->post();
-
+ mCodec->mCallback->onEos(mCodec->mInputEOSResult);
mCodec->mPortEOS[kPortIndexOutput] = true;
}
break;
@@ -6016,10 +5916,11 @@
IOMX::buffer_id bufferID;
CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
sp<RefBase> obj;
- sp<MediaCodecBuffer> buffer = nullptr;
- if (msg->findObject("buffer", &obj)) {
- buffer = static_cast<MediaCodecBuffer *>(obj.get());
- }
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+ int32_t discarded = 0;
+ msg->findInt32("discarded", &discarded);
+
ssize_t index;
BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
BufferInfo::Status status = BufferInfo::getSafeStatus(info);
@@ -6051,7 +5952,7 @@
int32_t render;
if (mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
- && buffer != NULL && buffer->size() != 0) {
+ && !discarded && buffer->size() != 0) {
ATRACE_NAME("render");
// The client wants this buffer to be rendered.
@@ -6090,8 +5991,7 @@
info->mIsReadFence = false;
}
} else {
- if (mCodec->mNativeWindow != NULL &&
- (buffer == NULL || buffer->size() != 0)) {
+ if (mCodec->mNativeWindow != NULL && (discarded || buffer->size() != 0)) {
// move read fence into write fence to avoid clobbering
info->mIsReadFence = false;
ATRACE_NAME("frame-drop");
@@ -6176,8 +6076,8 @@
mCodec->mOMX.clear();
mCodec->mOMXNode.clear();
mCodec->mFlags = 0;
- mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
- mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mCodec->mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mCodec->mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
mCodec->mConverter[0].clear();
mCodec->mConverter[1].clear();
mCodec->mComponentName.clear();
@@ -6209,21 +6109,18 @@
"keepComponentAllocated", &keepComponentAllocated));
ALOGW_IF(keepComponentAllocated,
"cannot keep component allocated on shutdown in Uninitialized state");
-
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
-
+ if (keepComponentAllocated) {
+ mCodec->mCallback->onStopCompleted();
+ } else {
+ mCodec->mCallback->onReleaseCompleted();
+ }
handled = true;
break;
}
case ACodec::kWhatFlush:
{
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
handled = true;
break;
}
@@ -6351,14 +6248,7 @@
omxNode->setQuirks(quirks);
mCodec->mOMX = omx;
mCodec->mOMXNode = omxNode;
-
- {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatComponentAllocated);
- notify->setString("componentName", mCodec->mComponentName.c_str());
- notify->post();
- }
-
+ mCodec->mCallback->onComponentAllocated(mCodec->mComponentName.c_str());
mCodec->changeState(mCodec->mLoadedState);
return true;
@@ -6407,9 +6297,11 @@
}
if (mCodec->mExplicitShutdown) {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ if (keepComponentAllocated) {
+ mCodec->mCallback->onStopCompleted();
+ } else {
+ mCodec->mCallback->onReleaseCompleted();
+ }
mCodec->mExplicitShutdown = false;
}
}
@@ -6461,10 +6353,7 @@
case ACodec::kWhatFlush:
{
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
handled = true;
break;
}
@@ -6497,13 +6386,7 @@
return false;
}
- {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatComponentConfigured);
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
- notify->post();
- }
+ mCodec->mCallback->onComponentConfigured(mCodec->mInputFormat, mCodec->mOutputFormat);
return true;
}
@@ -6625,9 +6508,6 @@
const sp<AMessage> & /* msg */) {
ALOGV("onCreateInputSurface");
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-
sp<IGraphicBufferProducer> bufferProducer;
status_t err = mCodec->mOMX->createInputSurface(
&bufferProducer, &mCodec->mGraphicBufferSource);
@@ -6637,12 +6517,9 @@
}
if (err == OK) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
-
- notify->setObject("input-surface",
+ mCodec->mCallback->onInputSurfaceCreated(
+ mCodec->mInputFormat,
+ mCodec->mOutputFormat,
new BufferProducerWrapper(bufferProducer));
} else {
// Can't use mCodec->signalError() here -- MediaCodec won't forward
@@ -6650,18 +6527,13 @@
// send a kWhatInputSurfaceCreated with an error value instead.
ALOGE("[%s] onCreateInputSurface returning error %d",
mCodec->mComponentName.c_str(), err);
- notify->setInt32("err", err);
+ mCodec->mCallback->onInputSurfaceCreationFailed(err);
}
- notify->post();
}
-void ACodec::LoadedState::onSetInputSurface(
- const sp<AMessage> &msg) {
+void ACodec::LoadedState::onSetInputSurface(const sp<AMessage> &msg) {
ALOGV("onSetInputSurface");
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatInputSurfaceAccepted);
-
sp<RefBase> obj;
CHECK(msg->findObject("input-surface", &obj));
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
@@ -6670,19 +6542,16 @@
status_t err = setupInputSurface();
if (err == OK) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
-
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
+ mCodec->mCallback->onInputSurfaceAccepted(
+ mCodec->mInputFormat, mCodec->mOutputFormat);
} else {
// Can't use mCodec->signalError() here -- MediaCodec won't forward
// the error through because it's in the "configured" state. We
// send a kWhatInputSurfaceAccepted with an error value instead.
ALOGE("[%s] onSetInputSurface returning error %d",
mCodec->mComponentName.c_str(), err);
- notify->setInt32("err", err);
+ mCodec->mCallback->onInputSurfaceDeclined(err);
}
- notify->post();
}
void ACodec::LoadedState::onStart() {
@@ -6760,9 +6629,7 @@
case kWhatFlush:
{
// We haven't even started yet, so we're flushed alright...
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCodec->mCallback->onFlushCompleted();
return true;
}
@@ -6832,10 +6699,7 @@
case kWhatFlush:
{
// We haven't even started yet, so we're flushed alright...
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
return true;
}
@@ -7210,17 +7074,11 @@
}
void ACodec::onSignalEndOfInputStream() {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
-
status_t err = INVALID_OPERATION;
if (mGraphicBufferSource != NULL) {
err = statusFromBinderStatus(mGraphicBufferSource->signalEndOfInputStream());
}
- if (err != OK) {
- notify->setInt32("err", err);
- }
- notify->post();
+ mCallback->onSignaledInputEOS(err);
}
bool ACodec::ExecutingState::onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
@@ -7414,8 +7272,7 @@
case kWhatShutdown:
{
- // We're already doing that...
-
+ mCodec->deferMessage(msg);
handled = true;
break;
}
@@ -7524,8 +7381,7 @@
switch (msg->what()) {
case kWhatShutdown:
{
- // We're already doing that...
-
+ mCodec->deferMessage(msg);
handled = true;
break;
}
@@ -7699,9 +7555,7 @@
mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCodec->mCallback->onFlushCompleted();
mCodec->mPortEOS[kPortIndexInput] =
mCodec->mPortEOS[kPortIndexOutput] = false;
@@ -7851,8 +7705,8 @@
// tunneled playback includes adaptive playback
builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
| MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
- } else if (omxNode->storeMetaDataInBuffers(
- kPortIndexOutput, OMX_TRUE) == OK ||
+ } else if (omxNode->setPortMode(
+ kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
omxNode->prepareForAdaptivePlayback(
kPortIndexOutput, OMX_TRUE,
1280 /* width */, 720 /* height */) == OK) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 4eacff5..9e3b35a 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -46,7 +46,6 @@
NuMediaExtractor.cpp \
OMXClient.cpp \
OggExtractor.cpp \
- ProcessInfo.cpp \
SampleIterator.cpp \
SampleTable.cpp \
SimpleDecodingSource.cpp \
@@ -60,7 +59,6 @@
VBRISeeker.cpp \
VideoFrameScheduler.cpp \
WAVExtractor.cpp \
- WVMExtractor.cpp \
XINGSeeker.cpp \
avc_utils.cpp \
@@ -88,6 +86,7 @@
libgui \
liblog \
libmedia \
+ libaudioclient \
libmediautils \
libnetd_client \
libsonivox \
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index efdee77..4ccd2d0 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -62,6 +62,8 @@
mPrevSampleTimeUs(0),
mInitialReadTimeUs(0),
mNumFramesReceived(0),
+ mNumFramesSkipped(0),
+ mNumFramesLost(0),
mNumClientOwnedBuffers(0) {
ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
sampleRate, outSampleRate, channelCount);
@@ -295,11 +297,27 @@
}
status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
- int64_t timeUs = systemTime() / 1000ll;
- // Estimate the real sampling time of the 1st sample in this buffer
- // from AudioRecord's latency. (Apply this adjustment first so that
- // the start time logic is not affected.)
- timeUs -= mRecord->latency() * 1000LL;
+ int64_t timeUs, position, timeNs;
+ ExtendedTimestamp ts;
+ ExtendedTimestamp::Location location;
+ const int32_t usPerSec = 1000000;
+
+ if (mRecord->getTimestamp(&ts) == OK &&
+ ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC,
+ &location) == OK) {
+ // Use audio timestamp.
+ timeUs = timeNs / 1000 -
+ (position - mNumFramesSkipped -
+ mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
+ } else {
+ // This should not happen in normal case.
+ ALOGW("Failed to get audio timestamp, fallback to use systemclock");
+ timeUs = systemTime() / 1000ll;
+ // Estimate the real sampling time of the 1st sample in this buffer
+ // from AudioRecord's latency. (Apply this adjustment first so that
+ // the start time logic is not affected.)
+ timeUs -= mRecord->latency() * 1000LL;
+ }
ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
Mutex::Autolock autoLock(mLock);
@@ -308,10 +326,15 @@
return OK;
}
+ const size_t bufferSize = audioBuffer.size;
+
// Drop retrieved and previously lost audio data.
if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
(void) mRecord->getInputFramesLost();
- ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
+ int64_t receievedFrames = bufferSize / mRecord->frameSize();
+ ALOGV("Drop audio data(%" PRId64 " frames) at %" PRId64 "/%" PRId64 " us",
+ receievedFrames, timeUs, mStartTimeUs);
+ mNumFramesSkipped += receievedFrames;
return OK;
}
@@ -320,11 +343,7 @@
// Initial delay
if (mStartTimeUs > 0) {
mStartTimeUs = timeUs - mStartTimeUs;
- } else {
- // Assume latency is constant.
- mStartTimeUs += mRecord->latency() * 1000;
}
-
mPrevSampleTimeUs = mStartTimeUs;
}
@@ -354,6 +373,7 @@
MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
memset(lostAudioBuffer->data(), 0, bufferSize);
lostAudioBuffer->set_range(0, bufferSize);
+ mNumFramesLost += bufferSize / mRecord->frameSize();
queueInputBuffer_l(lostAudioBuffer, timeUs);
}
@@ -362,7 +382,6 @@
return OK;
}
- const size_t bufferSize = audioBuffer.size;
MediaBuffer *buffer = new MediaBuffer(bufferSize);
memcpy((uint8_t *) buffer->data(),
audioBuffer.i16, audioBuffer.size);
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index 81fe0fe..37a40ec 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -34,10 +34,6 @@
mMemory(mem) {
}
-sp<MediaCodecBuffer> SharedMemoryBuffer::clone(const sp<AMessage> &format) {
- return new SharedMemoryBuffer(format, mMemory);
-}
-
SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
: MediaCodecBuffer(format, new ABuffer(nullptr, size)),
mPointer(ptr) {
@@ -50,12 +46,6 @@
mHandle(handle) {
}
-sp<MediaCodecBuffer> SecureBuffer::clone(const sp<AMessage> &format) {
- return (mHandle == nullptr)
- ? new SecureBuffer(format, mPointer, capacity())
- : new SecureBuffer(format, mHandle, capacity());
-}
-
void *SecureBuffer::getDestinationPointer() {
return (void *)(mHandle == nullptr ? mPointer : mHandle->handle());
}
diff --git a/media/libstagefright/CodecBase.cpp b/media/libstagefright/CodecBase.cpp
index f729d4d..3eca52a 100644
--- a/media/libstagefright/CodecBase.cpp
+++ b/media/libstagefright/CodecBase.cpp
@@ -35,4 +35,8 @@
CodecBase::PortDescription::~PortDescription() {
}
+void CodecBase::setCallback(std::shared_ptr<Callback> &&callback) {
+ mCallback = callback;
+}
+
} // namespace android
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 163a527..a9536b9 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -31,7 +31,6 @@
#include "include/NuCachedSource2.h"
#include "include/OggExtractor.h"
#include "include/WAVExtractor.h"
-#include "include/WVMExtractor.h"
#include "matroska/MatroskaExtractor.h"
@@ -112,83 +111,6 @@
////////////////////////////////////////////////////////////////////////////////
-Mutex DataSource::gSnifferMutex;
-List<DataSource::SnifferFunc> DataSource::gSniffers;
-bool DataSource::gSniffersRegistered = false;
-
-bool DataSource::sniff(
- String8 *mimeType, float *confidence, sp<AMessage> *meta) {
- *mimeType = "";
- *confidence = 0.0f;
- meta->clear();
-
- {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (!gSniffersRegistered) {
- return false;
- }
- }
-
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- String8 newMimeType;
- float newConfidence;
- sp<AMessage> newMeta;
- if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
- if (newConfidence > *confidence) {
- *mimeType = newMimeType;
- *confidence = newConfidence;
- *meta = newMeta;
- }
- }
- }
-
- return *confidence > 0.0;
-}
-
-// static
-void DataSource::RegisterSniffer_l(SnifferFunc func) {
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- if (*it == func) {
- return;
- }
- }
-
- gSniffers.push_back(func);
-}
-
-// static
-void DataSource::RegisterDefaultSniffers() {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (gSniffersRegistered) {
- return;
- }
-
- RegisterSniffer_l(SniffMPEG4);
- RegisterSniffer_l(SniffMatroska);
- RegisterSniffer_l(SniffOgg);
- RegisterSniffer_l(SniffWAV);
- RegisterSniffer_l(SniffFLAC);
- RegisterSniffer_l(SniffAMR);
- RegisterSniffer_l(SniffMPEG2TS);
- RegisterSniffer_l(SniffMP3);
- RegisterSniffer_l(SniffAAC);
- RegisterSniffer_l(SniffMPEG2PS);
- if (getuid() == AID_MEDIA) {
- // WVM only in the media server process
- RegisterSniffer_l(SniffWVM);
- }
- RegisterSniffer_l(SniffMidi);
-
- char value[PROPERTY_VALUE_MAX];
- if (property_get("drm.service.enabled", value, NULL)
- && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
- RegisterSniffer_l(SniffDRM);
- }
- gSniffersRegistered = true;
-}
-
// static
sp<DataSource> DataSource::CreateFromURI(
const sp<IMediaHTTPService> &httpService,
@@ -200,14 +122,10 @@
*contentType = "";
}
- bool isWidevine = !strncasecmp("widevine://", uri, 11);
-
sp<DataSource> source;
if (!strncasecmp("file://", uri, 7)) {
source = new FileSource(uri + 7);
- } else if (!strncasecmp("http://", uri, 7)
- || !strncasecmp("https://", uri, 8)
- || isWidevine) {
+ } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
if (httpService == NULL) {
ALOGE("Invalid http service!");
return NULL;
@@ -222,14 +140,6 @@
httpSource = new MediaHTTP(conn);
}
- String8 tmp;
- if (isWidevine) {
- tmp = String8("http://");
- tmp.append(uri + 11);
-
- uri = tmp.string();
- }
-
String8 cacheConfig;
bool disconnectAtHighwatermark;
KeyedVector<String8, String8> nonCacheSpecificHeaders;
@@ -246,20 +156,14 @@
return NULL;
}
- if (!isWidevine) {
- if (contentType != NULL) {
- *contentType = httpSource->getMIMEType();
- }
-
- source = NuCachedSource2::Create(
- httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
- disconnectAtHighwatermark);
- } else {
- // We do not want that prefetching, caching, datasource wrapper
- // in the widevine:// case.
- source = httpSource;
+ if (contentType != NULL) {
+ *contentType = httpSource->getMIMEType();
}
+
+ source = NuCachedSource2::Create(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
} else if (!strncasecmp("data:", uri, 5)) {
source = DataURISource::Create(uri);
} else {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 74eb590..9978b76 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -69,6 +69,7 @@
static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
static const uint8_t kNalUnitTypePicParamSet = 0x08;
static const int64_t kInitialDelayTimeUs = 700000LL;
+static const int64_t kMaxMetadataSize = 0x4000000LL; // 64MB max per-frame metadata size
static const char kMetaKey_Version[] = "com.android.version";
#ifdef SHOW_MODEL_BUILD
@@ -116,6 +117,7 @@
int32_t getTrackId() const { return mTrackId; }
status_t dump(int fd, const Vector<String16>& args) const;
static const char *getFourCCForMime(const char *mime);
+ const char *getTrackType() const;
private:
enum {
@@ -271,6 +273,7 @@
bool mIsAvc;
bool mIsHevc;
bool mIsAudio;
+ bool mIsVideo;
bool mIsMPEG4;
bool mIsMalformed;
int32_t mTrackId;
@@ -393,6 +396,7 @@
void writeMdhdBox(uint32_t now);
void writeSmhdBox();
void writeVmhdBox();
+ void writeNmhdBox();
void writeHdlrBox();
void writeTkhdBox(uint32_t now);
void writeColrBox();
@@ -400,6 +404,7 @@
void writeMp4vEsdsBox();
void writeAudioFourCCBox();
void writeVideoFourCCBox();
+ void writeMetadataFourCCBox();
void writeStblBox(bool use32BitOffset);
Track(const Track &);
@@ -430,6 +435,8 @@
mStartTimestampUs(-1ll),
mLatitudex10000(0),
mLongitudex10000(0),
+ mHasAudioTrack(false),
+ mHasVideoTrack(false),
mAreGeoTagsAvailable(false),
mStartTimeOffsetMs(-1),
mMetaKeys(new AMessage()) {
@@ -477,7 +484,7 @@
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, " %s track\n", mIsAudio? "Audio": "Video");
+ snprintf(buffer, SIZE, " %s track\n", getTrackType());
result.append(buffer);
snprintf(buffer, SIZE, " reached EOS: %s\n",
mReachedEOS? "true": "false");
@@ -513,8 +520,10 @@
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
return "hvc1";
}
+ } else if (!strncasecmp(mime, "application/", 12)) {
+ return "mett";
} else {
- ALOGE("Track (%s) other than video or audio is not supported", mime);
+ ALOGE("Track (%s) other than video/audio/metadata is not supported", mime);
}
return NULL;
}
@@ -526,37 +535,33 @@
return UNKNOWN_ERROR;
}
- // At most 2 tracks can be supported.
- if (mTracks.size() >= 2) {
- ALOGE("Too many tracks (%zu) to add", mTracks.size());
- return ERROR_UNSUPPORTED;
- }
-
CHECK(source.get() != NULL);
const char *mime;
source->getFormat()->findCString(kKeyMIMEType, &mime);
- bool isAudio = !strncasecmp(mime, "audio/", 6);
+
+ if (!strncasecmp(mime, "audio/", 6)) {
+ if (mHasAudioTrack) {
+ ALOGE("At most one audio track can be added");
+ return ERROR_UNSUPPORTED;
+ }
+ mHasAudioTrack = true;
+ }
+
+ if (!strncasecmp(mime, "video/", 6)) {
+ if (mHasVideoTrack) {
+ ALOGE("At most one video track can be added");
+ return ERROR_UNSUPPORTED;
+ }
+ mHasVideoTrack = true;
+ }
+
if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
return ERROR_UNSUPPORTED;
}
- // At this point, we know the track to be added is either
- // video or audio. Thus, we only need to check whether it
- // is an audio track or not (if it is not, then it must be
- // a video track).
-
- // No more than one video or one audio track is supported.
- for (List<Track*>::iterator it = mTracks.begin();
- it != mTracks.end(); ++it) {
- if ((*it)->isAudio() == isAudio) {
- ALOGE("%s track already exists", isAudio? "Audio": "Video");
- return ERROR_UNSUPPORTED;
- }
- }
-
- // This is the first track of either audio or video.
+ // This is a metadata track or the first track of either audio or video
// Go ahead to add the track.
Track *track = new Track(this, source, 1 + mTracks.size());
mTracks.push_back(track);
@@ -1561,11 +1566,12 @@
mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
mIsAudio = !strncasecmp(mime, "audio/", 6);
+ mIsVideo = !strncasecmp(mime, "video/", 6);
mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
// store temporal layer count
- if (!mIsAudio) {
+ if (mIsVideo) {
int32_t count;
if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
mOwner->setTemporalLayerCount(count);
@@ -1621,7 +1627,7 @@
void MPEG4Writer::Track::addOneCttsTableEntry(
size_t sampleCount, int32_t duration) {
- if (mIsAudio) {
+ if (!mIsVideo) {
return;
}
mCttsTableEntries->add(htonl(sampleCount));
@@ -1753,7 +1759,7 @@
void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
ALOGV("writeChunkToFile: %" PRId64 " from %s track",
- chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
+ chunk->mTimeStampUs, chunk->mTrack->getTrackType());
int32_t isFirstSample = true;
while (!chunk->mSamples.empty()) {
@@ -1906,7 +1912,7 @@
mStartTimeRealUs = startTimeUs;
int32_t rotationDegrees;
- if (!mIsAudio && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
+ if (mIsVideo && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
mRotation = rotationDegrees;
}
@@ -1964,7 +1970,7 @@
}
status_t MPEG4Writer::Track::stop() {
- ALOGD("%s track stopping", mIsAudio? "Audio": "Video");
+ ALOGD("%s track stopping", getTrackType());
if (!mStarted) {
ALOGE("Stop() called but track is not started");
return ERROR_END_OF_STREAM;
@@ -1975,15 +1981,15 @@
}
mDone = true;
- ALOGD("%s track source stopping", mIsAudio? "Audio": "Video");
+ ALOGD("%s track source stopping", getTrackType());
mSource->stop();
- ALOGD("%s track source stopped", mIsAudio? "Audio": "Video");
+ ALOGD("%s track source stopped", getTrackType());
void *dummy;
pthread_join(mThread, &dummy);
status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
- ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
+ ALOGD("%s track stopped", getTrackType());
return err;
}
@@ -2381,8 +2387,10 @@
if (mIsAudio) {
prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
- } else {
+ } else if (mIsVideo) {
prctl(PR_SET_NAME, (unsigned long)"VideoTrackEncoding", 0, 0, 0);
+ } else {
+ prctl(PR_SET_NAME, (unsigned long)"MetadataTrackEncoding", 0, 0, 0);
}
if (mOwner->isRealTimeRecording()) {
@@ -2393,7 +2401,7 @@
status_t err = OK;
MediaBuffer *buffer;
- const char *trackName = mIsAudio ? "Audio" : "Video";
+ const char *trackName = getTrackType();
while (!mDone && (err = mSource->read(&buffer)) == OK) {
if (buffer->range_length() == 0) {
buffer->release();
@@ -2450,6 +2458,16 @@
continue;
}
+ // Per-frame metadata sample's size must be smaller than max allowed.
+ if (!mIsVideo && !mIsAudio && buffer->range_length() >= kMaxMetadataSize) {
+ ALOGW("Buffer size is %zu. Maximum metadata buffer size is %lld for %s track",
+ buffer->range_length(), (long long)kMaxMetadataSize, trackName);
+ buffer->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
++nActualFrames;
// Make a deep copy of the MediaBuffer and Metadata and release
@@ -2536,7 +2554,7 @@
break;
}
- if (!mIsAudio) {
+ if (mIsVideo) {
/*
* Composition time: timestampUs
* Decoding time: decodingTimeUs
@@ -2661,7 +2679,6 @@
timestampUs += deltaUs;
}
}
-
mStszTableEntries->add(htonl(sampleSize));
if (mStszTableEntries->count() > 2) {
@@ -2808,7 +2825,7 @@
return true;
}
- if (!mIsAudio && mStssTableEntries->count() == 0) { // no sync frames for video
+ if (mIsVideo && mStssTableEntries->count() == 0) { // no sync frames for video
ALOGE("There are no sync frames for video track");
return true;
}
@@ -2831,7 +2848,7 @@
mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
trackNum | MEDIA_RECORDER_TRACK_INFO_TYPE,
- mIsAudio? 0: 1);
+ mIsAudio ? 0: 1);
mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
trackNum | MEDIA_RECORDER_TRACK_INFO_DURATION_MS,
@@ -2971,11 +2988,11 @@
return OK;
}
+const char *MPEG4Writer::Track::getTrackType() const {
+ return mIsAudio ? "Audio" : (mIsVideo ? "Video" : "Metadata");
+}
+
void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
-
- ALOGV("%s track time scale: %d",
- mIsAudio? "Audio": "Video", mTimeScale);
-
uint32_t now = getMpeg4Time();
mOwner->beginBox("trak");
writeTkhdBox(now);
@@ -2985,8 +3002,10 @@
mOwner->beginBox("minf");
if (mIsAudio) {
writeSmhdBox();
- } else {
+ } else if (mIsVideo) {
writeVmhdBox();
+ } else {
+ writeNmhdBox();
}
writeDinfBox();
writeStblBox(use32BitOffset);
@@ -3002,13 +3021,15 @@
mOwner->writeInt32(1); // entry count
if (mIsAudio) {
writeAudioFourCCBox();
- } else {
+ } else if (mIsVideo) {
writeVideoFourCCBox();
+ } else {
+ writeMetadataFourCCBox();
}
mOwner->endBox(); // stsd
writeSttsBox();
- writeCttsBox();
- if (!mIsAudio) {
+ if (mIsVideo) {
+ writeCttsBox();
writeStssBox();
}
writeStszBox();
@@ -3017,6 +3038,20 @@
mOwner->endBox(); // stbl
}
+void MPEG4Writer::Track::writeMetadataFourCCBox() {
+ const char *mime;
+ bool success = mMeta->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ const char *fourcc = getFourCCForMime(mime);
+ if (fourcc == NULL) {
+ ALOGE("Unknown mime type '%s'.", mime);
+ TRESPASS();
+ }
+ mOwner->beginBox(fourcc); // TextMetaDataSampleEntry
+ mOwner->writeCString(mime); // metadata mime_format
+ mOwner->endBox(); // mett
+}
+
void MPEG4Writer::Track::writeVideoFourCCBox() {
const char *mime;
bool success = mMeta->findCString(kKeyMIMEType, &mime);
@@ -3024,7 +3059,7 @@
const char *fourcc = getFourCCForMime(mime);
if (fourcc == NULL) {
ALOGE("Unknown mime type '%s'.", mime);
- CHECK(!"should not be here, unknown mime type.");
+ TRESPASS();
}
mOwner->beginBox(fourcc); // video format
@@ -3097,7 +3132,7 @@
const char *fourcc = getFourCCForMime(mime);
if (fourcc == NULL) {
ALOGE("Unknown mime type '%s'.", mime);
- CHECK(!"should not be here, unknown mime type.");
+ TRESPASS();
}
mOwner->beginBox(fourcc); // audio format
@@ -3240,7 +3275,7 @@
mOwner->writeCompositionMatrix(mRotation); // matrix
- if (mIsAudio) {
+ if (!mIsVideo) {
mOwner->writeInt32(0);
mOwner->writeInt32(0);
} else {
@@ -3273,16 +3308,22 @@
mOwner->endBox();
}
+void MPEG4Writer::Track::writeNmhdBox() {
+ mOwner->beginBox("nmhd");
+ mOwner->writeInt32(0); // version=0, flags=0
+ mOwner->endBox();
+}
+
void MPEG4Writer::Track::writeHdlrBox() {
mOwner->beginBox("hdlr");
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt32(0); // component type: should be mhlr
- mOwner->writeFourcc(mIsAudio ? "soun" : "vide"); // component subtype
+ mOwner->writeFourcc(mIsAudio ? "soun" : (mIsVideo ? "vide" : "meta")); // component subtype
mOwner->writeInt32(0); // reserved
mOwner->writeInt32(0); // reserved
mOwner->writeInt32(0); // reserved
// Removing "r" for the name string just makes the string 4 byte aligned
- mOwner->writeCString(mIsAudio ? "SoundHandle": "VideoHandle"); // name
+ mOwner->writeCString(mIsAudio ? "SoundHandle": (mIsVideo ? "VideoHandle" : "MetadHandle"));
mOwner->endBox();
}
@@ -3409,10 +3450,6 @@
}
void MPEG4Writer::Track::writeCttsBox() {
- if (mIsAudio) { // ctts is not for audio
- return;
- }
-
// There is no B frame at all
if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) {
return;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 12eca10..f29f786 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -67,6 +67,8 @@
static const int kMaxRetry = 2;
static const int kMaxReclaimWaitTimeInUs = 500000; // 0.5s
+////////////////////////////////////////////////////////////////////////////////
+
struct ResourceManagerClient : public BnResourceManagerClient {
explicit ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
@@ -171,10 +173,211 @@
return mService->reclaimResource(mPid, resources);
}
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+enum {
+ kWhatFillThisBuffer = 'fill',
+ kWhatDrainThisBuffer = 'drai',
+ kWhatEOS = 'eos ',
+ kWhatStopCompleted = 'scom',
+ kWhatReleaseCompleted = 'rcom',
+ kWhatFlushCompleted = 'fcom',
+ kWhatError = 'erro',
+ kWhatComponentAllocated = 'cAll',
+ kWhatComponentConfigured = 'cCon',
+ kWhatInputSurfaceCreated = 'isfc',
+ kWhatInputSurfaceAccepted = 'isfa',
+ kWhatSignaledInputEOS = 'seos',
+ kWhatBuffersAllocated = 'allc',
+ kWhatOutputFramesRendered = 'outR',
+};
+
+class MediaCodecCallback : public CodecBase::Callback {
+public:
+ explicit MediaCodecCallback(const sp<AMessage> ¬ify);
+ virtual ~MediaCodecCallback();
+
+ virtual void fillThisBuffer(IOMX::buffer_id bufferId, const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> &reply) override;
+ virtual void drainThisBuffer(IOMX::buffer_id bufferId, const sp<MediaCodecBuffer> &buffer,
+ int32_t flags, const sp<AMessage> &reply) override;
+ virtual void onEos(status_t err) override;
+ virtual void onStopCompleted() override;
+ virtual void onReleaseCompleted() override;
+ virtual void onFlushCompleted() override;
+ virtual void onError(status_t err, enum ActionCode actionCode) override;
+ virtual void onComponentAllocated(const char *componentName) override;
+ virtual void onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) override;
+ virtual void onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) override;
+ virtual void onInputSurfaceCreationFailed(status_t err) override;
+ virtual void onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) override;
+ virtual void onInputSurfaceDeclined(status_t err) override;
+ virtual void onSignaledInputEOS(status_t err) override;
+ virtual void onBuffersAllocated(
+ int32_t portIndex, const sp<CodecBase::PortDescription> &portDesc) override;
+ virtual void onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) override;
+private:
+ const sp<AMessage> mNotify;
+};
+
+MediaCodecCallback::MediaCodecCallback(const sp<AMessage> ¬ify) : mNotify(notify) {}
+
+MediaCodecCallback::~MediaCodecCallback() {}
+
+void MediaCodecCallback::fillThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> &reply) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFillThisBuffer);
+ notify->setInt32("buffer-id", bufferId);
+ notify->setObject("buffer", buffer);
+ notify->setMessage("reply", reply);
+ notify->post();
+}
+
+void MediaCodecCallback::drainThisBuffer(
+ IOMX::buffer_id bufferId,
+ const sp<MediaCodecBuffer> &buffer,
+ int32_t flags,
+ const sp<AMessage> &reply) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatDrainThisBuffer);
+ notify->setInt32("buffer-id", bufferId);
+ notify->setObject("buffer", buffer);
+ notify->setInt32("flags", flags);
+ notify->setMessage("reply", reply);
+ notify->post();
+}
+
+void MediaCodecCallback::onEos(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatEOS);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void MediaCodecCallback::onStopCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatStopCompleted);
+ notify->post();
+}
+
+void MediaCodecCallback::onReleaseCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatReleaseCompleted);
+ notify->post();
+}
+
+void MediaCodecCallback::onFlushCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFlushCompleted);
+ notify->post();
+}
+
+void MediaCodecCallback::onError(status_t err, enum ActionCode actionCode) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatError);
+ notify->setInt32("err", err);
+ notify->setInt32("actionCode", actionCode);
+ notify->post();
+}
+
+void MediaCodecCallback::onComponentAllocated(const char *componentName) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatComponentAllocated);
+ notify->setString("componentName", componentName);
+ notify->post();
+}
+
+void MediaCodecCallback::onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatComponentConfigured);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->post();
+}
+
+void MediaCodecCallback::onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceCreated);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->setObject("input-surface", inputSurface);
+ notify->post();
+}
+
+void MediaCodecCallback::onInputSurfaceCreationFailed(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceCreated);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void MediaCodecCallback::onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceAccepted);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->post();
+}
+
+void MediaCodecCallback::onInputSurfaceDeclined(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceAccepted);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void MediaCodecCallback::onSignaledInputEOS(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatSignaledInputEOS);
+ if (err != OK) {
+ notify->setInt32("err", err);
+ }
+ notify->post();
+}
+
+void MediaCodecCallback::onBuffersAllocated(
+ int32_t portIndex, const sp<CodecBase::PortDescription> &portDesc) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatBuffersAllocated);
+ notify->setInt32("portIndex", portIndex);
+ notify->setObject("portDesc", portDesc);
+ notify->post();
+}
+
+void MediaCodecCallback::onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatOutputFramesRendered);
+ if (MediaCodec::CreateFramesRenderedMessage(done, notify)) {
+ notify->post();
+ }
+}
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
// static
sp<MediaCodec> MediaCodec::CreateByType(
- const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid) {
- sp<MediaCodec> codec = new MediaCodec(looper, pid);
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
+ uid_t uid) {
+ sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
if (err != NULL) {
@@ -185,8 +388,8 @@
// static
sp<MediaCodec> MediaCodec::CreateByComponentName(
- const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid) {
- sp<MediaCodec> codec = new MediaCodec(looper, pid);
+ const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid, uid_t uid) {
+ sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
if (err != NULL) {
@@ -234,7 +437,7 @@
return new PersistentSurface(bufferProducer, bufferSource);
}
-MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid)
+MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid)
: mState(UNINITIALIZED),
mReleasedByResourceManager(false),
mLooper(looper),
@@ -256,6 +459,11 @@
mDequeueOutputReplyID(0),
mHaveInputSurface(false),
mHavePendingInputBuffers(false) {
+ if (uid == kNoUid) {
+ mUid = IPCThreadState::self()->getCallingUid();
+ } else {
+ mUid = uid;
+ }
}
MediaCodec::~MediaCodec() {
@@ -364,7 +572,8 @@
mLooper->registerHandler(this);
- mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, this));
+ mCodec->setCallback(
+ std::make_shared<MediaCodecCallback>(new AMessage(kWhatCodecNotify, this)));
sp<AMessage> msg = new AMessage(kWhatInit, this);
msg->setString("name", name);
@@ -837,16 +1046,6 @@
return OK;
}
-status_t MediaCodec::getWidevineLegacyBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
- sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
- msg->setInt32("portIndex", kPortIndexInput);
- msg->setPointer("buffers", buffers);
- msg->setInt32("widevine", true);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
status_t MediaCodec::getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
msg->setInt32("portIndex", kPortIndexInput);
@@ -1060,7 +1259,7 @@
CHECK(msg->findInt32("what", &what));
switch (what) {
- case CodecBase::kWhatError:
+ case kWhatError:
{
int32_t err, actionCode;
CHECK(msg->findInt32("err", &err));
@@ -1195,7 +1394,7 @@
break;
}
- case CodecBase::kWhatComponentAllocated:
+ case kWhatComponentAllocated:
{
CHECK_EQ(mState, INITIALIZING);
setState(INITIALIZED);
@@ -1227,7 +1426,7 @@
break;
}
- case CodecBase::kWhatComponentConfigured:
+ case kWhatComponentConfigured:
{
if (mState == UNINITIALIZED || mState == INITIALIZED) {
// In case a kWhatError message came in and replied with error,
@@ -1256,7 +1455,7 @@
break;
}
- case CodecBase::kWhatInputSurfaceCreated:
+ case kWhatInputSurfaceCreated:
{
// response to initiateCreateInputSurface()
status_t err = NO_ERROR;
@@ -1280,7 +1479,7 @@
break;
}
- case CodecBase::kWhatInputSurfaceAccepted:
+ case kWhatInputSurfaceAccepted:
{
// response to initiateSetInputSurface()
status_t err = NO_ERROR;
@@ -1296,7 +1495,7 @@
break;
}
- case CodecBase::kWhatSignaledInputEOS:
+ case kWhatSignaledInputEOS:
{
// response to signalEndOfInputStream()
sp<AMessage> response = new AMessage;
@@ -1309,7 +1508,7 @@
}
- case CodecBase::kWhatBuffersAllocated:
+ case kWhatBuffersAllocated:
{
Mutex::Autolock al(mBufferLock);
int32_t portIndex;
@@ -1333,12 +1532,30 @@
static_cast<CodecBase::PortDescription *>(obj.get());
size_t numBuffers = portDesc->countBuffers();
+
+ size_t totalSize = 0;
+ for (size_t i = 0; i < numBuffers; ++i) {
+ if (portIndex == kPortIndexInput && mCrypto != NULL) {
+ totalSize += portDesc->bufferAt(i)->capacity();
+ }
+ }
+
+ if (totalSize) {
+ mDealer = new MemoryDealer(totalSize, "MediaCodec");
+ }
+
for (size_t i = 0; i < numBuffers; ++i) {
BufferInfo info;
info.mBufferID = portDesc->bufferIDAt(i);
info.mOwnedByClient = false;
+ sp<MediaCodecBuffer> buffer = portDesc->bufferAt(i);
+ if (portIndex == kPortIndexInput && mCrypto != NULL) {
+ info.mSharedEncryptedBuffer = mDealer->allocate(buffer->capacity());
+ buffer = new SharedMemoryBuffer(
+ mInputFormat, info.mSharedEncryptedBuffer);
+ }
buffers->push_back(info);
- mPortBufferArrays[portIndex].push_back(portDesc->bufferAt(i));
+ mPortBufferArrays[portIndex].push_back(buffer);
}
if (portIndex == kPortIndexOutput) {
@@ -1362,7 +1579,7 @@
break;
}
- case CodecBase::kWhatOutputFramesRendered:
+ case kWhatOutputFramesRendered:
{
// ignore these in all states except running, and check that we have a
// notification set
@@ -1374,7 +1591,7 @@
break;
}
- case CodecBase::kWhatFillThisBuffer:
+ case kWhatFillThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexInput, msg);
@@ -1432,7 +1649,7 @@
break;
}
- case CodecBase::kWhatDrainThisBuffer:
+ case kWhatDrainThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexOutput, msg);
@@ -1529,26 +1746,33 @@
break;
}
- case CodecBase::kWhatEOS:
+ case kWhatEOS:
{
// We already notify the client of this by using the
// corresponding flag in "onOutputBufferReady".
break;
}
- case CodecBase::kWhatShutdownCompleted:
+ case kWhatStopCompleted:
{
- if (mState == UNINITIALIZED) {
- // Ignore shutdown complete if we're already released.
+ if (mState != STOPPING) {
+ ALOGW("Received kWhatStopCompleted in state %d", mState);
break;
}
- if (mState == STOPPING) {
- setState(INITIALIZED);
- } else {
- CHECK_EQ(mState, RELEASING);
- setState(UNINITIALIZED);
- mComponentName.clear();
+ setState(INITIALIZED);
+ (new AMessage)->postReply(mReplyID);
+ break;
+ }
+
+ case kWhatReleaseCompleted:
+ {
+ if (mState != RELEASING) {
+ ALOGW("Received kWhatReleaseCompleted in state %d", mState);
+ break;
}
+ setState(UNINITIALIZED);
+ mComponentName.clear();
+
mFlags &= ~kFlagIsComponentAllocated;
mResourceManagerService->removeResource(getId(mResourceManagerClient));
@@ -1557,7 +1781,7 @@
break;
}
- case CodecBase::kWhatFlushCompleted:
+ case kWhatFlushCompleted:
{
if (mState != FLUSHING) {
ALOGW("received FlushCompleted message in state %d",
@@ -2108,12 +2332,7 @@
{
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- // Unfortunately widevine legacy source requires knowing all of the
- // codec input buffers, so we have to provide them even in async mode.
- int32_t widevine = 0;
- msg->findInt32("widevine", &widevine);
-
- if (!isExecuting() || ((mFlags & kFlagIsAsync) && !widevine)) {
+ if (!isExecuting() || (mFlags & kFlagIsAsync)) {
PostReplyWithError(replyID, INVALID_OPERATION);
break;
} else if (mFlags & kFlagStickyError) {
@@ -2345,9 +2564,10 @@
if (info->mNotify != NULL) {
sp<AMessage> msg = info->mNotify;
- info->mNotify = NULL;
- msg->setObject("buffer", (portIndex == kPortIndexInput && mCrypto != NULL)
+ msg->setObject("buffer", (info->mSecureData != nullptr)
? info->mSecureData : info->mData);
+ msg->setInt32("discarded", true);
+ info->mNotify = NULL;
if (isReclaim && info->mOwnedByClient) {
ALOGD("port %d buffer %zu still owned by client when codec is reclaimed",
portIndex, i);
@@ -2366,7 +2586,6 @@
}
mAvailPortBuffers[portIndex].clear();
- mPortBufferArrays[portIndex].clear();
}
size_t MediaCodec::updateBuffers(
@@ -2380,17 +2599,6 @@
sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
- if (portIndex == kPortIndexInput && mCrypto != NULL && mDealer == NULL) {
- // Lazy initialization for encrypted buffers.
- size_t capacity = buffer->capacity();
- size_t totalSize = capacity * buffers->size();
-
- mDealer = new MemoryDealer(totalSize, "MediaCodec");
- for (size_t i = 0; i < buffers->size(); ++i) {
- BufferInfo *info = &buffers->editItemAt(i);
- info->mSharedEncryptedBuffer = mDealer->allocate(capacity);
- }
- }
for (size_t i = 0; i < buffers->size(); ++i) {
BufferInfo *info = &buffers->editItemAt(i);
@@ -2401,8 +2609,7 @@
if (portIndex == kPortIndexInput && mCrypto != NULL) {
info->mSecureData = buffer;
- info->mData = new SharedMemoryBuffer(
- buffer->format(), info->mSharedEncryptedBuffer);
+ info->mData = mPortBufferArrays[portIndex][i];
} else {
info->mData = buffer;
}
@@ -2904,10 +3111,10 @@
}
if (mState == CONFIGURED && !mBatteryStatNotified) {
- BatteryNotifier::getInstance().noteStartVideo();
+ BatteryNotifier::getInstance().noteStartVideo(mUid);
mBatteryStatNotified = true;
} else if (mState == UNINITIALIZED && mBatteryStatNotified) {
- BatteryNotifier::getInstance().noteStopVideo();
+ BatteryNotifier::getInstance().noteStopVideo(mUid);
mBatteryStatNotified = false;
}
}
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index f2fdbc9..9362a07 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -28,7 +28,6 @@
#include "include/MPEG2PSExtractor.h"
#include "include/MPEG2TSExtractor.h"
#include "include/DRMExtractor.h"
-#include "include/WVMExtractor.h"
#include "include/FLACExtractor.h"
#include "include/AACExtractor.h"
#include "include/MidiExtractor.h"
@@ -151,15 +150,9 @@
ALOGW("creating media extractor in calling process");
return CreateFromService(source, mime);
} else {
- // Check if it's WVM, since WVMExtractor needs to be created in the media server process,
- // not the extractor process.
String8 mime8;
float confidence;
sp<AMessage> meta;
- if (SniffWVM(source, &mime8, &confidence, &meta) &&
- !strcasecmp(mime8, MEDIA_MIMETYPE_CONTAINER_WVM)) {
- return new WVMExtractor(source);
- }
// Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
// process, not the extractor process.
@@ -192,14 +185,14 @@
const sp<DataSource> &source, const char *mime) {
ALOGV("MediaExtractor::CreateFromService %s", mime);
- DataSource::RegisterDefaultSniffers();
+ RegisterDefaultSniffers();
sp<AMessage> meta;
String8 tmp;
if (mime == NULL) {
float confidence;
- if (!source->sniff(&tmp, &confidence, &meta)) {
+ if (!sniff(source, &tmp, &confidence, &meta)) {
ALOGV("FAILED to autodetect media content.");
return NULL;
@@ -251,9 +244,6 @@
ret = new MatroskaExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
ret = new MPEG2TSExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM) && getuid() == AID_MEDIA) {
- // Return now. WVExtractor should not have the DrmFlag set in the block below.
- return new WVMExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
ret = new AACExtractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
@@ -273,4 +263,79 @@
return ret;
}
+Mutex MediaExtractor::gSnifferMutex;
+List<MediaExtractor::SnifferFunc> MediaExtractor::gSniffers;
+bool MediaExtractor::gSniffersRegistered = false;
+
+// static
+bool MediaExtractor::sniff(
+ const sp<DataSource> &source, String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+ *mimeType = "";
+ *confidence = 0.0f;
+ meta->clear();
+
+ {
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (!gSniffersRegistered) {
+ return false;
+ }
+ }
+
+ for (List<SnifferFunc>::iterator it = gSniffers.begin();
+ it != gSniffers.end(); ++it) {
+ String8 newMimeType;
+ float newConfidence;
+ sp<AMessage> newMeta;
+ if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
+ if (newConfidence > *confidence) {
+ *mimeType = newMimeType;
+ *confidence = newConfidence;
+ *meta = newMeta;
+ }
+ }
+ }
+
+ return *confidence > 0.0;
+}
+
+// static
+void MediaExtractor::RegisterSniffer_l(SnifferFunc func) {
+ for (List<SnifferFunc>::iterator it = gSniffers.begin();
+ it != gSniffers.end(); ++it) {
+ if (*it == func) {
+ return;
+ }
+ }
+
+ gSniffers.push_back(func);
+}
+
+// static
+void MediaExtractor::RegisterDefaultSniffers() {
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (gSniffersRegistered) {
+ return;
+ }
+
+ RegisterSniffer_l(SniffMPEG4);
+ RegisterSniffer_l(SniffMatroska);
+ RegisterSniffer_l(SniffOgg);
+ RegisterSniffer_l(SniffWAV);
+ RegisterSniffer_l(SniffFLAC);
+ RegisterSniffer_l(SniffAMR);
+ RegisterSniffer_l(SniffMPEG2TS);
+ RegisterSniffer_l(SniffMP3);
+ RegisterSniffer_l(SniffAAC);
+ RegisterSniffer_l(SniffMPEG2PS);
+ RegisterSniffer_l(SniffMidi);
+
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("drm.service.enabled", value, NULL)
+ && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
+ RegisterSniffer_l(SniffDRM);
+ }
+ gSniffersRegistered = true;
+}
+
+
} // namespace android
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index ad27856..6f8220f 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -22,7 +22,6 @@
#include "include/ESDS.h"
#include "include/NuCachedSource2.h"
-#include "include/WVMExtractor.h"
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -40,8 +39,7 @@
namespace android {
NuMediaExtractor::NuMediaExtractor()
- : mIsWidevineExtractor(false),
- mTotalBitrate(-1ll),
+ : mTotalBitrate(-1ll),
mDurationUs(-1ll) {
}
@@ -51,7 +49,8 @@
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
- CHECK_EQ((status_t)OK, info->mSource->stop());
+ status_t err = info->mSource->stop();
+ ALOGE_IF(err != OK, "error %d stopping track %zu", err, i);
}
mSelectedTracks.clear();
@@ -77,48 +76,15 @@
return -ENOENT;
}
- mIsWidevineExtractor = false;
- if (!strncasecmp("widevine://", path, 11)) {
- String8 mimeType;
- float confidence;
- sp<AMessage> dummy;
- bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
-
- if (!success
- || strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
- return ERROR_UNSUPPORTED;
- }
-
- sp<WVMExtractor> extractor = new WVMExtractor(dataSource);
- extractor->setAdaptiveStreamingMode(true);
-
- mImpl = extractor;
- mIsWidevineExtractor = true;
- } else {
- mImpl = MediaExtractor::Create(dataSource);
- }
+ mImpl = MediaExtractor::Create(dataSource);
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
}
sp<MetaData> fileMeta = mImpl->getMetaData();
- const char *containerMime;
- if (fileMeta != NULL
- && fileMeta->findCString(kKeyMIMEType, &containerMime)
- && !strcasecmp(containerMime, "video/wvm")) {
- // We always want to use "cryptoPluginMode" when using the wvm
- // extractor. We can tell that it is this extractor by looking
- // at the container mime type.
- // The cryptoPluginMode ensures that the extractor will actually
- // give us data in a call to MediaSource::read(), unlike its
- // default mode that we used in AwesomePlayer.
- // TODO: change default mode
- static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
- } else if (mImpl->getDrmFlag()) {
- // For all other drm content, we don't want to expose decrypted
- // content to Java application.
+ if (mImpl->getDrmFlag()) {
+ // Don't expose decrypted content to Java application
mImpl.clear();
mImpl = NULL;
return ERROR_UNSUPPORTED;
@@ -632,15 +598,7 @@
Mutex::Autolock autoLock(mLock);
int64_t bitrate;
- if (mIsWidevineExtractor) {
- sp<WVMExtractor> wvmExtractor =
- static_cast<WVMExtractor *>(mImpl.get());
-
- status_t finalStatus;
- *durationUs = wvmExtractor->getCachedDurationUs(&finalStatus);
- *eos = (finalStatus != OK);
- return true;
- } else if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
+ if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
&& getTotalBitrate(&bitrate)) {
sp<NuCachedSource2> cachedSource =
static_cast<NuCachedSource2 *>(mDataSource.get());
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 47d360c..8061bc6 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -572,6 +572,10 @@
}
for (size_t i = 0; i < mNumSyncSamples; ++i) {
+ if (mSyncSamples[i] == 0) {
+ ALOGE("b/32423862, unexpected zero value in stss");
+ continue;
+ }
mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index d8fec5c..5e00c44 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -55,8 +55,6 @@
: mParsedMetaData(false),
mAlbumArt(NULL) {
ALOGV("StagefrightMetadataRetriever()");
-
- DataSource::RegisterDefaultSniffers();
}
StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index 58f2c60..5b8f23a 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -83,8 +83,23 @@
scale,
entrySize);
+ if (entrySize > 4) {
+ ALOGE("invalid VBRI entry size: %zu", entrySize);
+ return NULL;
+ }
+
+ sp<VBRISeeker> seeker = new (std::nothrow) VBRISeeker;
+ if (seeker == NULL) {
+ ALOGW("Couldn't allocate VBRISeeker");
+ return NULL;
+ }
+
size_t totalEntrySize = numEntries * entrySize;
- uint8_t *buffer = new uint8_t[totalEntrySize];
+ uint8_t *buffer = new (std::nothrow) uint8_t[totalEntrySize];
+ if (!buffer) {
+ ALOGW("Couldn't allocate %zu bytes", totalEntrySize);
+ return NULL;
+ }
n = source->readAt(pos + sizeof(vbriHeader), buffer, totalEntrySize);
if (n < (ssize_t)totalEntrySize) {
@@ -94,7 +109,6 @@
return NULL;
}
- sp<VBRISeeker> seeker = new VBRISeeker;
seeker->mBasePos = post_id3_pos + frameSize;
// only update mDurationUs if the calculated duration is valid (non zero)
// otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
deleted file mode 100644
index d1b2f54..0000000
--- a/media/libstagefright/WVMExtractor.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WVMExtractor"
-#include <utils/Log.h>
-
-#include "include/WVMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <dlfcn.h>
-
-#include <utils/Errors.h>
-
-/* The extractor lifetime is short - just long enough to get
- * the media sources constructed - so the shared lib needs to remain open
- * beyond the lifetime of the extractor. So keep the handle as a global
- * rather than a member of the extractor
- */
-void *gVendorLibHandle = NULL;
-
-namespace android {
-
-static Mutex gWVMutex;
-
-WVMExtractor::WVMExtractor(const sp<DataSource> &source)
- : mDataSource(source)
-{
- Mutex::Autolock autoLock(gWVMutex);
-
- if (!getVendorLibHandle()) {
- return;
- }
-
- typedef WVMLoadableExtractor *(*GetInstanceFunc)(sp<DataSource>);
- GetInstanceFunc getInstanceFunc =
- (GetInstanceFunc) dlsym(gVendorLibHandle,
- "_ZN7android11GetInstanceENS_2spINS_10DataSourceEEE");
-
- if (getInstanceFunc) {
- if (source->DrmInitialization(
- MEDIA_MIMETYPE_CONTAINER_WVM) != NULL) {
- mImpl = (*getInstanceFunc)(source);
- CHECK(mImpl != NULL);
- setDrmFlag(true);
- } else {
- ALOGE("Drm manager failed to initialize.");
- }
- } else {
- ALOGE("Failed to locate GetInstance in libwvm.so");
- }
-}
-
-static void init_routine()
-{
- gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
- if (gVendorLibHandle == NULL) {
- ALOGE("Failed to open libwvm.so: %s", dlerror());
- }
-}
-
-bool WVMExtractor::getVendorLibHandle()
-{
- static pthread_once_t sOnceControl = PTHREAD_ONCE_INIT;
- pthread_once(&sOnceControl, init_routine);
-
- return gVendorLibHandle != NULL;
-}
-
-WVMExtractor::~WVMExtractor() {
-}
-
-size_t WVMExtractor::countTracks() {
- return (mImpl != NULL) ? mImpl->countTracks() : 0;
-}
-
-sp<IMediaSource> WVMExtractor::getTrack(size_t index) {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getTrack(index);
-}
-
-sp<MetaData> WVMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> WVMExtractor::getMetaData() {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getMetaData();
-}
-
-int64_t WVMExtractor::getCachedDurationUs(status_t *finalStatus) {
- if (mImpl == NULL) {
- return 0;
- }
-
- return mImpl->getCachedDurationUs(finalStatus);
-}
-
-status_t WVMExtractor::getEstimatedBandwidthKbps(int32_t *kbps) {
- if (mImpl == NULL) {
- return UNKNOWN_ERROR;
- }
-
- return mImpl->getEstimatedBandwidthKbps(kbps);
-}
-
-
-void WVMExtractor::setAdaptiveStreamingMode(bool adaptive) {
- if (mImpl != NULL) {
- mImpl->setAdaptiveStreamingMode(adaptive);
- }
-}
-
-void WVMExtractor::setCryptoPluginMode(bool cryptoPluginMode) {
- if (mImpl != NULL) {
- mImpl->setCryptoPluginMode(cryptoPluginMode);
- }
-}
-
-void WVMExtractor::setUID(uid_t uid) {
- if (mImpl != NULL) {
- mImpl->setUID(uid);
- }
-}
-
-status_t WVMExtractor::getError() {
- if (mImpl == NULL) {
- return UNKNOWN_ERROR;
- }
-
- return mImpl->getError();
-}
-
-void WVMExtractor::setError(status_t err) {
- if (mImpl != NULL) {
- mImpl->setError(err);
- }
-}
-
-bool SniffWVM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
-
- Mutex::Autolock autoLock(gWVMutex);
-
- if (!WVMExtractor::getVendorLibHandle()) {
- return false;
- }
-
- typedef WVMLoadableExtractor *(*SnifferFunc)(const sp<DataSource>&);
- SnifferFunc snifferFunc =
- (SnifferFunc) dlsym(gVendorLibHandle,
- "_ZN7android15IsWidevineMediaERKNS_2spINS_10DataSourceEEE");
-
- if (snifferFunc) {
- if ((*snifferFunc)(source)) {
- *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM;
- *confidence = 10.0f;
- return true;
- }
- } else {
- ALOGE("IsWidevineMedia not found in libwvm.so");
- }
-
- return false;
-}
-
-} //namespace android
-
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
index 30e3643..7290193 100644
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -60,10 +60,6 @@
//////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
-void MediaFilter::setNotificationMessage(const sp<AMessage> &msg) {
- mNotify = msg;
-}
-
void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
msg->setWhat(kWhatAllocateComponent);
msg->setTarget(this);
@@ -223,10 +219,7 @@
}
void MediaFilter::signalError(status_t error) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatError);
- notify->setInt32("err", error);
- notify->post();
+ mCallback->onError(error, ACTION_CODE_FATAL);
}
status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
@@ -266,11 +259,6 @@
}
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
- notify->setInt32("portIndex", portIndex);
-
sp<PortDescription> desc = new PortDescription;
for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
@@ -278,9 +266,7 @@
desc->addBuffer(info.mBufferID, info.mData);
}
-
- notify->setObject("portDesc", desc);
- notify->post();
+ mCallback->onBuffersAllocated(portIndex, desc);
return OK;
}
@@ -314,20 +300,14 @@
info->mGeneration = mGeneration;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
-
info->mData->meta()->clear();
- notify->setObject("buffer", info->mData);
sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, this);
reply->setInt32("buffer-id", info->mBufferID);
- notify->setMessage("reply", reply);
-
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
- notify->post();
+
+ mCallback->fillThisBuffer(info->mBufferID, info->mData, reply);
}
void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
@@ -335,27 +315,17 @@
info->mGeneration = mGeneration;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
- notify->setInt32("flags", info->mOutputFlags);
- notify->setObject("buffer", info->mData);
-
sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, this);
reply->setInt32("buffer-id", info->mBufferID);
- notify->setMessage("reply", reply);
-
- notify->post();
+ mCallback->drainThisBuffer(
+ info->mBufferID, info->mData, info->mOutputFlags, reply);
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
}
void MediaFilter::postEOS() {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatEOS);
- notify->setInt32("err", ERROR_END_OF_STREAM);
- notify->post();
+ mCallback->onEos(ERROR_END_OF_STREAM);
ALOGV("Sent kWhatEOS.");
}
@@ -445,11 +415,8 @@
return;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatComponentAllocated);
// HACK - need "OMX.google" to use MediaCodec's software renderer
- notify->setString("componentName", "OMX.google.MediaFilter");
- notify->post();
+ mCallback->onComponentAllocated("OMX.google.MediaFilter");
mState = INITIALIZED;
ALOGV("Handled kWhatAllocateComponent.");
}
@@ -526,12 +493,7 @@
mOutputFormat->setInt32("width", mWidth);
mOutputFormat->setInt32("height", mHeight);
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatComponentConfigured);
- notify->setString("componentName", "MediaFilter");
- notify->setMessage("input-format", mInputFormat);
- notify->setMessage("output-format", mOutputFormat);
- notify->post();
+ mCallback->onComponentConfigured(mInputFormat, mOutputFormat);
mState = CONFIGURED;
ALOGV("Handled kWhatConfigureComponent.");
}
@@ -675,9 +637,11 @@
mState = INITIALIZED;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ if (keepComponentAllocated) {
+ mCallback->onStopCompleted();
+ } else {
+ mCallback->onReleaseCompleted();
+ }
}
void MediaFilter::onFlush() {
@@ -699,9 +663,7 @@
mPortEOS[kPortIndexOutput] = false;
mInputEOSResult = OK;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCallback->onFlushCompleted();
ALOGV("Posted kWhatFlushCompleted");
// MediaCodec returns all input buffers after flush, so in
@@ -733,13 +695,10 @@
return;
}
- sp<AMessage> reply = mNotify->dup();
- reply->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
- reply->setObject(
- "input-surface",
+ mCallback->onInputSurfaceCreated(
+ nullptr, nullptr,
new BufferProducerWrapper(
mGraphicBufferListener->getIGraphicBufferProducer()));
- reply->post();
}
void MediaFilter::onInputFrameAvailable() {
@@ -801,9 +760,7 @@
}
mPortEOS[kPortIndexOutput] = true;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
- notify->post();
+ mCallback->onSignaledInputEOS(OK);
ALOGV("Output stream saw EOS.");
}
diff --git a/media/libstagefright/foundation/tests/Android.mk b/media/libstagefright/foundation/tests/Android.mk
new file mode 100644
index 0000000..e7598ca
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Android.mk
@@ -0,0 +1,33 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := sf_foundation_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ Flagged_test.cpp \
+ TypeTraits_test.cpp \
+ Utils_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright_foundation \
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/include \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/foundation/tests/Flagged_test.cpp b/media/libstagefright/foundation/tests/Flagged_test.cpp
new file mode 100644
index 0000000..3c90699
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Flagged_test.cpp
@@ -0,0 +1,639 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Flagged_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/Flagged.h>
+
+namespace android {
+
+/**
+ * Helper template that can be used to print values in static_assert error messages.
+ *
+ * Use integers here.
+ */
+template<bool, int ...N>
+struct _print_as_warning { };
+
+template<int ...N>
+struct _print_as_warning<true, N...> : std::true_type { };
+
+#define static_assert_equals(a, b, msg) \
+static_assert(_print_as_warning<(a) == (b), a, b>::value, msg)
+
+class FlaggedTest : public ::testing::Test {
+protected:
+ // empty structs
+ struct A0 { };
+ struct A1 { };
+ struct A_A0 : public A0 { };
+
+ // simple struct
+ struct BB {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct inheriting from A0
+ struct BB_A0 : public A0 {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct inheriting from struct inheriting A0
+ struct BB_AA0 : public A_A0 {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct that wraps
+ struct WBBA0 {
+ BB_A0 b;
+ };
+
+ struct WBBA0_A1 : public A1 {
+ BB_A0 b;
+ };
+
+ struct WBBA0_A0 : public A0 {
+ BB_A0 b;
+ };
+
+ struct WBB_A0 : public A0 {
+ BB b;
+ };
+
+ struct WBBA0_AA0 : public A_A0 {
+ BB_A0 b;
+ };
+
+ struct WBBAA0_A0 : public A0 {
+ BB_AA0 b;
+ };
+
+ struct WWBBA0_A0 : public A0 {
+ WBBA0 b;
+ };
+};
+
+/**
+ * This test is here to confirm the handling of wrapping classes that inherit from an interface
+ * while also inheriting from that same interface. While we no longer use this construct, we want
+ * to track if this defect is ever fixed.
+ */
+TEST_F(FlaggedTest, StaticSanityTests) {
+ static_assert(sizeof(A0) == 1, "");
+ static_assert(sizeof(A1) == 1, "");
+ static_assert(sizeof(A_A0) == 1, "");
+
+ static constexpr size_t size = sizeof(BB); // original [pair]
+
+ // inheriting from A0 does not increase size
+ static_assert(sizeof(BB_A0) == size, ""); // [pair]:A0
+ static_assert(sizeof(BB_AA0) == size, ""); // [pair]:[:A0]
+
+ // wrapping a class that inherits from A0 does not increase size
+ static_assert(sizeof(WBBA0) == size, ""); // [ [pair]:[:A0] ]
+
+ // wrapping a class that inherits from A0 while also inheriting from A1 does not increase size
+ static_assert(sizeof(WBBA0_A1) == size, ""); // [ [pair]:A0 ]:A1
+
+ // wrapping a class that inherits from A0 while also inheriting from A0 DOES increase size
+ EXPECT_GT(sizeof(WBBA0_A0), size); // [ [pair]:A0 ]:A0
+
+ // wrapping a class that does not inherit from A0 while inheriting from A0 does not increase
+ // size
+ static_assert(sizeof(WBB_A0) == size, ""); // [[pair]]:A0
+
+ // wrapping a class that inherits from A0 while also inheriting from a class that inherits
+ // from A0 does increase size
+ EXPECT_GT(sizeof(WBBA0_AA0), size); // [ [pair]:A0 ]:[:A0]
+
+ // wrapping a class that indirectly inherits from A0 while also inheriting from A0 does
+ // increase size
+ EXPECT_GT(sizeof(WBBAA0_A0), size); // [ [pair]:[:A0] ]:A0
+
+ // wrapping a class that inherits from A0 while also inheriting A0 does increase size
+ EXPECT_GT(sizeof(WWBBA0_A0), size); // [ [pair]:A0 ]:A0
+}
+
+enum FLAG : int32_t {
+ kMask0 = 0x0FF,
+ kFlag0_A = 0x0AA,
+ kFlag0_B = 0x0BB,
+ kFlag0_C = 0x0CC,
+ kMask1 = 0xFF0,
+ kFlag1_A = 0xAA0,
+ kFlag1_B = 0xBB0,
+ kFlag1_C = 0xCC0,
+ kMaskCommon = 0x0F0,
+};
+
+TEST_F(FlaggedTest, BasicExample) {
+ enum SafeFlags : uint32_t {
+ kUnsafe,
+ kSafe,
+ kSafeMask = _Flagged_helper::minMask(kSafe),
+ };
+ typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+
+ safeInt32 a(kUnsafe);
+ a.setFlags(kSafe);
+ a.get() = 15;
+ EXPECT_EQ(a.flags(), kSafe);
+ EXPECT_EQ(a.get(), 15);
+
+ enum OriginFlags : uint32_t {
+ kUnknown,
+ kConst,
+ kCalculated,
+ kComponent,
+ kApplication,
+ kFile,
+ kBinder,
+ kOriginMask = _Flagged_helper::minMask(kBinder),
+ };
+ typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+ trackedSafeInt32;
+
+ static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+
+ trackedSafeInt32 b(kConst, kSafe, 1);
+ EXPECT_EQ(b.flags(), kConst);
+ EXPECT_EQ(b.get().flags(), kSafe);
+ EXPECT_EQ(b.get().get(), 1);
+ b.setFlags(kCalculated);
+ volatile bool overflow = true;
+ b.get().setFlags(overflow ? kUnsafe : kSafe);
+
+ enum ValidatedFlags : uint32_t {
+ kUnsafeV = kUnsafe,
+ kSafeV = kSafe,
+ kValidated = kSafe | 2,
+ kSharedMaskV = kSafeMask,
+ kValidatedMask = _Flagged_helper::minMask(kValidated),
+ };
+ typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+
+ validatedInt32 v(kUnsafeV, kSafe, 10);
+ EXPECT_EQ(v.flags(), kUnsafeV);
+ EXPECT_EQ(v.get().flags(), kUnsafe); // !kUnsafeV overrides kSafe
+ EXPECT_EQ(v.get().get(), 10);
+ v.setFlags(kValidated);
+ EXPECT_EQ(v.flags(), kValidated);
+ EXPECT_EQ(v.get().flags(), kSafe);
+ v.get().setFlags(kUnsafe);
+ EXPECT_EQ(v.flags(), 2); // NOTE: sharing masks with enums allows strange situations to occur
+}
+
+TEST_F(FlaggedTest, _Flagged_helper_Test) {
+ using helper = _Flagged_helper;
+
+ using i32 = int32_t;
+ using u32 = uint32_t;
+ using u8 = uint8_t;
+
+ // base2
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagShift == 10, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagMask == 99u, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sEffectiveMask == 99u, "");
+
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagMask == 0x99u, "");
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagShift == 12, "");
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sEffectiveMask == 0x99000u, "");
+
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagMask == 99u, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sEffectiveMask == 99u, "");
+
+ // mask_of<T, Flag>
+ // also Flagged<> no default
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 0 /* shift */> i32_800f_0;
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 4 /* shift */> i32_800f_4;
+ // this also tests that these types can be instantiated
+ static_assert(sizeof(i32_800f_0) >= sizeof(i32) + sizeof(u32),
+ "should be at least size of component types");
+ static_assert(sizeof(i32_800f_4) == sizeof(i32_800f_0), "regardless of shift");
+ static_assert(!i32_800f_0::sFlagCombined, "");
+ static_assert(!i32_800f_4::sFlagCombined, "");
+
+ static_assert(helper::mask_of<i32_800f_0, u32>::value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_0, i32>::value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_0, u32>::effective_value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_0, i32>::effective_value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_0, u32>::shift == 0, "incorrect shift");
+ static_assert(helper::mask_of<i32_800f_0, i32>::shift == 0,
+ "shift should be 0 when types mismatch");
+
+ static_assert(helper::mask_of<i32_800f_4, u32>::value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_4, i32>::value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_4, u32>::effective_value == 0x800F0, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_4, i32>::effective_value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_4, u32>::shift == 4, "incorrect shift");
+ static_assert(helper::mask_of<i32_800f_4, i32>::shift == 0,
+ "shift should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32, u32>::value == 0, "mask should be 0 if not masked");
+ static_assert(helper::mask_of<i32, i32>::value == 0, "mask should be 0 if not masked");
+
+ // lshift(value, n)
+ static_assert(helper::lshift(0U, 0) == 0U, "");
+ static_assert(helper::lshift(0U, 30) == 0U, "");
+ static_assert(helper::lshift(1U, 0) == 1U, "");
+ static_assert(helper::lshift(1U, 10) == 1024U, "");
+ static_assert(helper::lshift(10U, 10) == 10240U, "");
+ static_assert(helper::lshift(10, 10) == 10240, "");
+ static_assert(helper::lshift(-10, 0) == -10, "");
+ // static_assert(helper::lshift(-10, 10) == -10240, ""); // error: left shift of negative value
+
+ // minMask(maxValue)
+ static_assert(helper::minMask(0U) == 0U, "lowest 0 bits");
+ static_assert(helper::minMask(1U) == 1U, "lowest 1 bit");
+ static_assert(helper::minMask(2U) == 3U, "lowest 2 bits");
+ static_assert(helper::minMask(3U) == 3U, "lowest 2 bits");
+ static_assert(helper::minMask(4U) == 7U, "lowest 3 bits");
+ static_assert(helper::minMask(~0U) == ~0U, "all bits");
+ // static_assert(helper::minMask(10) == 0xF, "all bits"); // error: must be unsigned
+
+ // topBits(n)
+ static_assert(helper::topBits<u32>(0) == 0U, "top 0 bit");
+ static_assert(helper::topBits<u32>(1) == 0x80000000U, "top 1 bit");
+ static_assert(helper::topBits<u32>(2) == 0xC0000000U, "top 2 bits");
+ static_assert(helper::topBits<u32>(12) == 0xFFF00000U, "top 12 bits");
+ static_assert(helper::topBits<u32>(32) == 0xFFFFFFFFU, "all bits");
+ // static_assert(helper::topBits<u32>(33) == 0xFFFFFFFFU, ""); // should OVERFLOW
+
+ static_assert(helper::topBits<u8>(0) == 0U, "top 0 bit");
+ static_assert(helper::topBits<u8>(1) == 0x80U, "top 1 bit");
+ static_assert(helper::topBits<u8>(2) == 0xC0U, "top 2 bit");
+ static_assert(helper::topBits<u8>(8) == 0xFFU, "all bits");
+ // static_assert(helper::topBits<u8>(9) == 0xFFU, ""); // should OVERFLOW
+
+ // getShift(mask, base, shared, base-shift, base-effective)
+ static_assert(helper::getShift(0u, 0u, 0u, 0, 0u) == 0, "no flag require no shift");
+ static_assert(helper::getShift(0u, 0u, 1u, 0, 0u) == -1,
+ "shared must be within mask and base mask");
+ static_assert(helper::getShift(0u, 1u, 1u, 0, 1u) == -1, "shared must be within mask");
+ static_assert(helper::getShift(0u, 1u, 0u, 0, 1u) == 0,
+ "no flags require no shift even with base mask");
+ static_assert(helper::getShift(0u, 1u, 0u, 1, 2u) == 0,
+ "no flags require no shift even with shifted base mask");
+ static_assert(helper::getShift(1u, 0u, 0u, 0, 0u) == 0, "no base mask requires no shift");
+ static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+ "overlapping mask and basemask requires shift");
+ static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+ "overlapping mask and basemask requires shift");
+ static_assert(helper::getShift(1u, 1u, 1u, 0, 1u) == 0,
+ "shared mask requires using base shift");
+ static_assert(helper::getShift(1u, 1u, 1u, 1, 2u) == 1,
+ "shared mask requires using base shift");
+ static_assert(helper::getShift(3u, 5u, 1u, 0, 5u) == 0,
+ "mask and basemask that overlap only in shared region requires no shift");
+ static_assert(helper::getShift(3u, 7u, 1u, 0, 7u) == -1,
+ "mask and basemask must not overlap in more than shared region");
+ static_assert(helper::getShift(1u, 0u, 1u, 0, 0u) == -1, "shared must be within base mask");
+
+ static_assert(helper::getShift(0u, 1u, 0u, 1, 1u) == -2, "effective mask must cover base mask");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 2u) == -2, "effective mask must cover base mask");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 10u) == 0, "");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 31u) == 0,
+ "effective mask can be larger than base mask");
+
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+ "(0x800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == 16,
+ "(0x800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+ "(0x1800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == -1,
+ "(0x1800F << 16) overflows");
+
+ // verify that when not sharing masks, effective mask makes the difference
+ static_assert(helper::getShift(0x800Fu, 0u, 0u, 0, 0x800Fu) == 4,
+ "(0x800F << 4) & 0x800F == 0");
+ static_assert(helper::getShift(0x800Fu, 0x2u, 0u, 0, 0x8002u) == 2,
+ "(0x800F << 2) & 0x8002 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x1u, 0u, 15, 0x8001u) == 1,
+ "(0x800F << 1) & 0x8001 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0u, 16, 0x800F0000u) == 0,
+ "0x800F & 0x800F0000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x800F8000u, 0u, 0, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0xF0000u, 0u, 0, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x1Fu, 0u, 15, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0xFFu, 0x80808080u, 0u, 0, 0x80808080u) == -1,
+ "0xFF always overlaps with 0x80808080");
+ static_assert(helper::getShift(0xFFu, 0x10001000u, 0u, 3, 0x80808080u) == -1,
+ "0xFF always overlaps with 0x80808080");
+ static_assert(helper::getShift(0xFFu, 0x80808040u, 0u, 0, 0x80808040u) == 7,
+ "(0xFF << 7) & 0x 80808040 == 0");
+
+ // verify min_shift (mask must be positive or no shift can be required)
+ static_assert(helper::getShift(0xFF, 0x40808040, 0, 0, 0x40808040) == 7, "");
+ static_assert(helper::getShift((i32)0x800000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+ static_assert(helper::getShift(0x100000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+ static_assert(helper::getShift(0xFF, (i32)0x80808040, 0, 0, (i32)0x80808040) == 7, "");
+ static_assert(helper::getShift((i32)0x80007F80, 0x40808040, 0, 0, 0x40808040) == 0, "");
+
+ // shared mask can also be negative (but not shift can be required)
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0x80000000,
+ 0, (i32)0xC0808040) == 0, "");
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0xC0000000,
+ 0, (i32)0xC0808040) == -1, "");
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0x60404020, (i32)0x60000000,
+ 1, (i32)0xC0808040) == -1, "");
+
+ // min_shift
+ typedef Flagged<i32, u32, 0u> i32_0_0;
+ typedef Flagged<i32, u32, 1u> i32_1_0;
+ typedef Flagged<i32, u32, 1u, 0u, 1> i32_1_1;
+
+ // this is a wrapper over getShift, so same test cases apply when T is flagged
+ static_assert(helper::min_shift<i32_0_0, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 1u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 1u, 0u>::value == 1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 1u, 1u>::value == 0, "");
+ static_assert(helper::min_shift<i32_1_1, u32, 1u, 1u>::value == 1, "");
+ static_assert(helper::min_shift<i32_1_1, u32, 3u, 0u>::value == 2, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 5u>, u32, 3u, 1u>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 7u>, u32, 3u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 1u, 1u>::value == -1, "");
+
+ static_assert(helper::min_shift<i32_800f_0, u32, 0x800Fu, 0u>::value == 4, "");
+ static_assert(helper::min_shift<i32_800f_4, u32, 0x1800Fu, 0x800Fu>::value == 4, "");
+ static_assert(helper::min_shift<i32_800f_4, u32, 0x800Fu, 0u>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value == 2, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value == 1, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x800Fu, 0u, 16>, u32, 0x800Fu, 0u>::value == 0, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value == 5, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value == -1, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value == 7, "");
+
+ // for min_shift, non-tagged type behaves as if having base mask of 0
+ static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<u32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32, u32, 1u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 1u, 1u>::value == -1, "");
+
+ // verify min_shift (mask must be positive or no shift can be required)
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value == 7, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x800000FF, 0>::value == -1, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, 0x100000FF, 0>::value == -1, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+ i32, 0xFF, 0>::value == 7, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x80007F80, 0>::value == 0, "");
+
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+ i32, (i32)0x80007F80, (i32)0x80000000>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0xC0808040>,
+ i32, (i32)0x80007F80, (i32)0xC0000000>::value == -1, "");
+ // note: cannot create a flagged type with signed flag and shift
+ // static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x60404020, 0, 1>,
+ // i32, (i32)0x40003FC0, (i32)0x40000000>::value == -1, "");
+
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16;
+ static_assert_equals(sizeof(i32_800f_16), sizeof(i32_800f_0), "");
+ // shifted mask overflows!
+ // typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 17 /* shift */> i32_800f_17;
+ // static_assert(sizeof(i32_800f_17) == sizeof(i32_800f_0), "");
+ typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 15 /* shift */> i32_800f_15i;
+ static_assert_equals(sizeof(i32_800f_15i), sizeof(i32_800f_0), "");
+ // shifted mask overflows!
+ // typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16i;
+ // static_assert(sizeof(i32_800f_16i) == sizeof(i32_800f_0), "");
+
+ // canCombine(mask, base, shared, shift, base-shift, base-effective)
+ static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "using no mask is valid");
+ static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "");
+ static_assert(helper::canCombine(0u, 0u, 0u, 4, 0, 0u), "");
+ static_assert(!helper::canCombine(0u, 0u, 1u, 0, 0, 0u),
+ "shared mask must be the overlap of masks");
+ static_assert(helper::canCombine(1u, 0u, 0u, 0, 0, 0u), "");
+ static_assert(helper::canCombine(1u, 0u, 0u, 4, 0, 0u), "");
+ static_assert(helper::canCombine(3u, 5u, 1u, 0, 0, 5u), "");
+ static_assert(!helper::canCombine(3u, 3u, 3u, 1, 0, 3u), "shift must match when sharing mask");
+ static_assert(helper::canCombine(3u, 3u, 3u, 1, 1, 6u), "");
+ static_assert(!helper::canCombine(3u, 3u, 3u, 1, 2, 12u), "shift must match when sharing mask");
+ static_assert(!helper::canCombine(3u, 7u, 1u, 0, 0, 7u), "");
+ static_assert(!helper::canCombine(1u, 0u, 1u, 0, 0, 0u), "");
+
+ static_assert(!helper::canCombine(0u, 1u, 1u, 0, 0, 1u),
+ "shared mask must be the overlap of masks");
+ static_assert(helper::canCombine(0u, 1u, 0u, 0, 0, 1u), "");
+ static_assert(helper::canCombine(0u, 1u, 0u, 4, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+ static_assert(!helper::canCombine(1u, 1u, 0u, 0, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 1u, 0, 0, 1u), "");
+ static_assert(!helper::canCombine(1u, 1u, 1u, 1, 0, 1u), "shift must match when sharing mask");
+
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 4, 0, 0x800Fu), "");
+ static_assert(!helper::canCombine(0x800Fu, 0x800Fu, 0u, 1, 0, 0x800Fu), "");
+ static_assert(helper::canCombine(0x800Fu, 0x8002u, 0u, 2, 0, 0x8002u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x8001u, 0u, 1, 0, 0x8001u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+ static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+ static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800F8000u, 0u, 8, 0, 0x800F8000u), "");
+ static_assert(!helper::canCombine(0xFFu, 0x80808080u, 0u, -1, 0, 0x80808080u), "");
+ static_assert(helper::canCombine(0xFFu, 0x80808040u, 0u, 7, 0, 0x80808040u), "");
+ static_assert(helper::canCombine(0xFFu, 0x8000u, 0u, 7, 0, 0x80808040u), "");
+ static_assert(helper::canCombine(0xFFu, 0x101u, 0u, 7, 15, 0x80808040u), "");
+
+ // can combine signed-flagged types only if mask is positive or no shift is required
+ static_assert(!helper::canCombine(0xFF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(helper::canCombine(0xFF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0xFF, (i32)0x80808040, 0, 0, 0, (i32)0x80808040), "");
+ static_assert(helper::canCombine(0xFF, (i32)0x80808040, 0, 7, 0, (i32)0x80808040), "");
+ static_assert(helper::canCombine((i32)0x80007F80, 0x40808040, 0, 0, 0, 0x40808040), "");
+
+ static_assert(helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 0, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 0, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 1, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 1, 0, (i32)0x80808040), "");
+
+ // can_combine<T, Flag, MASK, [SHARED_MASK], [SHIFT]
+ static_assert(helper::can_combine<i32_0_0, u32, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_0_0, u32, 0u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_0_0, u32, 1u, 1u>::value, "");
+
+ static_assert(!helper::can_combine<i32_1_0, u32, 0u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u, 4>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32_1_0, u32, 1u, 0u, 0>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u, 1>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u, 0>::value, "");
+ static_assert(!helper::can_combine<i32_1_0, u32, 1u, 1u, 1>::value,
+ "shouldn't be able to use SHIFT with SHARED_MASK");
+
+ static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 1>::value, "");
+ static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x800F0000u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value, "");
+
+ // can combine signed-flagged types only if mask is positive or no shift is required
+ static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x800000FF, 0>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, 0x100000FF, 0>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>, i32, 0xFF, 0>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x80007F80, 0>::value, "");
+
+ static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>,
+ i32, (i32)0x80007F80, (i32)0x80000000>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, (i32)0xC0808040>,
+ i32, (i32)0x80007F80, (i32)0xC0000000>::value, "");
+
+ static_assert(helper::min_shift<Flagged<i32, FLAG, (FLAG)0x80808040>,
+ FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value == 0, "");
+ static_assert(helper::can_combine<Flagged<i32, FLAG, (FLAG)0x80808040>,
+ FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value, "");
+
+ // cannot combine non-tagged types
+ static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<u32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 0u, 1u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 1u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 1u, 1u>::value, "");
+
+ typedef Flagged<i32_800f_0, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f;
+ static_assert(i32_800f_800f::sFlagMask == 0x800F, "");
+ static_assert(i32_800f_800f::sFlagShift == 4, "");
+ static_assert(i32_800f_800f::sEffectiveMask == 0x880FF, "");
+ static_assert(!i32_800f_0::sFlagCombined, "");
+ static_assert(!i32_800f_4::sFlagCombined, "");
+
+ static_assert(i32_800f_800f::sFlagCombined, "");
+ static_assert_equals(sizeof(i32_800f_800f), sizeof(i32_800f_0), "");
+
+ typedef Flagged<i32_800f_0, u32, 0x1FFFF /* mask */> i32_800f_1ffff;
+ static_assert(i32_800f_1ffff::sFlagMask == 0x1FFFF, "");
+ static_assert(i32_800f_1ffff::sFlagShift == 0, "");
+ static_assert(i32_800f_1ffff::sEffectiveMask == 0x1FFFF, "");
+ static_assert(!i32_800f_1ffff::sFlagCombined, "");
+
+ // operational tests
+ i32_800f_800f val(0x8000, 0x1234, 56);
+ EXPECT_EQ(val.get().get(), 56);
+ EXPECT_EQ(val.flags(), 0x8000u);
+ EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+ val.setFlags(0x12345);
+ EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+ val.get().setFlags(0x54321);
+ EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(val.get().flags(), 0x54321u & 0x800F);
+ EXPECT_EQ(val.get().get(), 56);
+
+ typedef Flagged<i32_800f_4, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f_B;
+ static_assert(i32_800f_800f_B::sFlagMask == 0x800F, "");
+ static_assert(i32_800f_800f_B::sFlagShift == 0, "");
+ static_assert(i32_800f_800f_B::sEffectiveMask == 0x880FF, "");
+
+ i32_800f_800f_B valB(0x8000, 0x1234, -987);
+ EXPECT_EQ(valB.get().get(), -987);
+ EXPECT_EQ(valB.flags(), 0x8000u);
+ EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+ valB.setFlags(0x12345);
+ EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+ valB.get().setFlags(0x5C321);
+ EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(valB.get().flags(), 0x5C321u & 0x800F);
+ EXPECT_EQ(valB.get().get(), -987);
+
+ typedef Flagged<Flagged<i32, u32, 0xFF>, u32, 0xFF0, 0xF0> i32_ff_ff0;
+ i32_ff_ff0 valC(0xABCD, 0x1234, 101);
+ EXPECT_EQ(valC.get().get(), 101);
+ EXPECT_EQ(valC.flags(), 0xBC0u);
+ EXPECT_EQ(valC.get().flags(), 0xC4u);
+ valC.setFlags(0x12345);
+ EXPECT_EQ(valC.flags(), 0x340u);
+ EXPECT_EQ(valC.get().flags(), 0x44u);
+ valC.get().setFlags(0x54321);
+ EXPECT_EQ(valC.flags(), 0x320u);
+ EXPECT_EQ(valC.get().flags(), 0x21u);
+ EXPECT_EQ(valC.get().get(), 101);
+
+ // when combining flags (with no shift), it should work with signed flags
+ typedef Flagged<Flagged<i32, FLAG, kMask0>, FLAG, kMask1, kMaskCommon> i32_F_ff_ff0;
+ static_assert(i32_F_ff_ff0::sFlagCombined, "flags should be combined");
+
+ i32_F_ff_ff0 valD(kFlag1_A, kFlag0_A, 1023);
+ EXPECT_EQ(valD.get().get(), 1023);
+ EXPECT_EQ(valD.flags(), kFlag1_A);
+ EXPECT_EQ(valD.get().flags(), kFlag0_A);
+ valD.setFlags(kFlag1_B);
+ EXPECT_EQ(valD.flags(), kFlag1_B);
+ EXPECT_EQ(valD.get().flags(), FLAG(0x0BA));
+ valD.get().setFlags(kFlag0_C);
+ EXPECT_EQ(valD.flags(), FLAG(0xBC0));
+ EXPECT_EQ(valD.get().flags(), kFlag0_C);
+ EXPECT_EQ(valD.get().get(), 1023);
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/TypeTraits_test.cpp b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
new file mode 100644
index 0000000..9fba435
--- /dev/null
+++ b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TypeTraits_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+class TypeTraitsTest : public ::testing::Test {
+protected:
+ enum A { };
+ enum UA : uint32_t { };
+ enum IA : int32_t { };
+};
+
+// =========== basic sanity tests for type-support templates
+TEST_F(TypeTraitsTest, StaticTests) {
+ static_assert(!std::is_integral<A>::value, "enums should not be integral");
+ static_assert(!std::is_integral<UA>::value, "enums should not be integral");
+ static_assert(!std::is_integral<IA>::value, "enums should not be integral");
+ static_assert(is_integral_or_enum<A>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<UA>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<IA>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<int>::value, "ints should be integral_or_enum");
+ static_assert(is_integral_or_enum<unsigned>::value, "unsigned ints should be integral_or_enum");
+ static_assert(!is_integral_or_enum<float>::value, "floats should not be integral_or_enum");
+
+ static_assert(!std::is_unsigned<UA>::value,
+ "unsigned enums should not be unsigned");
+ static_assert(!std::is_unsigned<IA>::value,
+ "unsigned enums should not be unsigned");
+ static_assert(std::is_unsigned<typename std::underlying_type<UA>::type>::value,
+ "underlying type of unsigned enums should be unsigned");
+ static_assert(!std::is_unsigned<typename std::underlying_type<IA>::type>::value,
+ "underlying type of unsigned enums should be unsigned");
+ static_assert(is_unsigned_integral<UA>::value,
+ "unsigned enums should be unsigned_integral");
+ static_assert(!is_unsigned_integral<IA>::value,
+ "signed enums should not be unsigned_integral");
+ static_assert(is_unsigned_integral<unsigned>::value,
+ "unsigned ints should be unsigned_integral");
+ static_assert(!is_unsigned_integral<int>::value,
+ "ints should not be unsigned_integral");
+ static_assert(!is_unsigned_integral<float>::value,
+ "floats should not be unsigned_integral");
+
+ static_assert(!std::is_signed<UA>::value,
+ "unsigned enums should not be signed");
+ static_assert(!std::is_signed<IA>::value,
+ "unsigned enums should not be signed");
+ static_assert(!std::is_signed<typename std::underlying_type<UA>::type>::value,
+ "underlying type of unsigned enums should be signed");
+ static_assert(std::is_signed<typename std::underlying_type<IA>::type>::value,
+ "underlying type of unsigned enums should be signed");
+ static_assert(!is_signed_integral<UA>::value,
+ "unsigned enums should not be signed_integral");
+ static_assert(is_signed_integral<IA>::value,
+ "signed enums should be signed_integral");
+ static_assert(!is_signed_integral<unsigned>::value,
+ "unsigned ints should not be signed_integral");
+ static_assert(is_signed_integral<int>::value,
+ "ints should be signed_integral");
+ static_assert(!is_signed_integral<float>::value,
+ "floats should not be signed_integral");
+
+ static_assert(std::is_same<uint64_t, typename underlying_integral_type<uint64_t>::type>::value,
+ "underlying integral type of uint64_t should be uint64_t");
+ static_assert(std::is_same<uint32_t, typename underlying_integral_type<UA>::type>::value,
+ "underlying integral type of uint32_t based enums should be uint32_t");
+ static_assert(std::is_same<int64_t, typename underlying_integral_type<int64_t>::type>::value,
+ "underlying integral type of int64_t should be int64_t");
+ static_assert(std::is_same<int32_t, typename underlying_integral_type<IA>::type>::value,
+ "underlying integral type of int32_t based enums should be int32_t");
+ //typedef underlying_integral_type<float>::type no_type;
+ static_assert(std::is_same<void, typename underlying_integral_type<float, void>::type>::value,
+ "underlying integral type of float cannot be specified");
+}
+
+} // namespace android
diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/foundation/tests/Utils_test.cpp
similarity index 98%
rename from media/libstagefright/tests/Utils_test.cpp
rename to media/libstagefright/foundation/tests/Utils_test.cpp
index d736501..0439d5c 100644
--- a/media/libstagefright/tests/Utils_test.cpp
+++ b/media/libstagefright/foundation/tests/Utils_test.cpp
@@ -18,15 +18,11 @@
#define LOG_TAG "Utils_test"
#include <gtest/gtest.h>
-#include <utils/String8.h>
-#include <utils/Errors.h>
-#include <fcntl.h>
-#include <unistd.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AStringUtils.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/Utils.h> // for FOURCC
namespace android {
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
index b3ca6d5..a7bd6a2 100644
--- a/media/libstagefright/http/Android.mk
+++ b/media/libstagefright/http/Android.mk
@@ -13,9 +13,9 @@
$(TOP)/frameworks/base/core/jni \
LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libandroid_runtime \
- libmedia
+ liblog libutils libbinder \
+ libandroid_runtime \
+ libmedia
LOCAL_MODULE:= libstagefright_http_support
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 8b9472e..477280a 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -178,7 +178,7 @@
*shortTermBps = mShortTermEstimate;
}
- int32_t minEstimate = -1, maxEstimate = -1;
+ int64_t minEstimate = -1, maxEstimate = -1;
List<int32_t>::iterator it;
for (it = mPrevEstimates.begin(); it != mPrevEstimates.end(); it++) {
int32_t estimate = *it;
@@ -518,9 +518,10 @@
return err;
}
-status_t LiveSession::seekTo(int64_t timeUs) {
+status_t LiveSession::seekTo(int64_t timeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("timeUs", timeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -1441,8 +1442,11 @@
void LiveSession::onSeek(const sp<AMessage> &msg) {
int64_t timeUs;
+ int32_t mode;
CHECK(msg->findInt64("timeUs", &timeUs));
- changeConfiguration(timeUs);
+ CHECK(msg->findInt32("mode", &mode));
+ // TODO: add "mode" to changeConfiguration.
+ changeConfiguration(timeUs/* , (MediaPlayerSeekMode)mode */);
}
status_t LiveSession::getDuration(int64_t *durationUs) const {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 65a824e..a0138be 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -86,7 +86,7 @@
status_t disconnect();
// Blocks until seek is complete.
- status_t seekTo(int64_t timeUs);
+ status_t seekTo(int64_t timeUs, MediaPlayerSeekMode mode);
status_t getDuration(int64_t *durationUs) const;
size_t getTrackCount() const;
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index bffed52..9105084 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -839,20 +839,21 @@
}
}
-static size_t StringSize(const uint8_t *start, uint8_t encoding) {
+// return includes terminator; if unterminated, returns > limit
+static size_t StringSize(const uint8_t *start, size_t limit, uint8_t encoding) {
+
if (encoding == 0x00 || encoding == 0x03) {
// ISO 8859-1 or UTF-8
- return strlen((const char *)start) + 1;
+ return strnlen((const char *)start, limit) + 1;
}
// UCS-2
size_t n = 0;
- while (start[n] != '\0' || start[n + 1] != '\0') {
+ while ((n+1 < limit) && (start[n] != '\0' || start[n + 1] != '\0')) {
n += 2;
}
-
- // Add size of null termination.
- return n + 2;
+ n += 2;
+ return n;
}
const void *
@@ -873,11 +874,19 @@
if (mVersion == ID3_V2_3 || mVersion == ID3_V2_4) {
uint8_t encoding = data[0];
- mime->setTo((const char *)&data[1]);
- size_t mimeLen = strlen((const char *)&data[1]) + 1;
+ size_t consumed = 1;
+
+ // *always* in an 8-bit encoding
+ size_t mimeLen = StringSize(&data[consumed], size - consumed, 0x00);
+ if (mimeLen > size - consumed) {
+ ALOGW("bogus album art size: mime");
+ return NULL;
+ }
+ mime->setTo((const char *)&data[consumed]);
+ consumed += mimeLen;
#if 0
- uint8_t picType = data[1 + mimeLen];
+ uint8_t picType = data[consumed];
if (picType != 0x03) {
// Front Cover Art
it.next();
@@ -885,20 +894,30 @@
}
#endif
- size_t descLen = StringSize(&data[2 + mimeLen], encoding);
-
- if (size < 2 ||
- size - 2 < mimeLen ||
- size - 2 - mimeLen < descLen) {
- ALOGW("bogus album art sizes");
+ consumed++;
+ if (consumed >= size) {
+ ALOGW("bogus album art size: pic type");
return NULL;
}
- *length = size - 2 - mimeLen - descLen;
- return &data[2 + mimeLen + descLen];
+ size_t descLen = StringSize(&data[consumed], size - consumed, encoding);
+ consumed += descLen;
+
+ if (consumed >= size) {
+ ALOGW("bogus album art size: description");
+ return NULL;
+ }
+
+ *length = size - consumed;
+
+ return &data[consumed];
} else {
uint8_t encoding = data[0];
+ if (size <= 5) {
+ return NULL;
+ }
+
if (!memcmp(&data[1], "PNG", 3)) {
mime->setTo("image/png");
} else if (!memcmp(&data[1], "JPG", 3)) {
@@ -918,7 +937,10 @@
}
#endif
- size_t descLen = StringSize(&data[5], encoding);
+ size_t descLen = StringSize(&data[5], size - 5, encoding);
+ if (descLen > size - 5) {
+ return NULL;
+ }
*length = size - 5 - descLen;
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index b2f4188..442a3ff 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -154,8 +154,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
for (int i = 1; i < argc; ++i) {
scan(argv[i]);
}
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 85ee4ee..ab12a86 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -50,13 +50,10 @@
status_t getConfig(OMX_INDEXTYPE index, void *params, size_t size);
status_t setConfig(OMX_INDEXTYPE index, const void *params, size_t size);
- status_t enableNativeBuffers(OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+ status_t setPortMode(OMX_U32 port_index, IOMX::PortMode mode);
status_t getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage);
- status_t storeMetaDataInBuffers(
- OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
-
status_t prepareForAdaptivePlayback(
OMX_U32 portIndex, OMX_BOOL enable,
OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
@@ -137,6 +134,8 @@
KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+ bool mLegacyAdaptiveExperiment;
+ IOMX::PortMode mPortMode[2];
// metadata and secure buffer type tracking
MetadataBufferType mMetadataType[2];
enum SecureBufferType {
@@ -180,15 +179,11 @@
bool isProhibitedIndex_l(OMX_INDEXTYPE index);
- status_t useBuffer(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize);
-
status_t useBuffer_l(
OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize);
+ OMX::buffer_id *buffer);
- status_t useGraphicBuffer(
+ status_t useGraphicBuffer_l(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
OMX::buffer_id *buffer);
@@ -200,16 +195,16 @@
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
OMX::buffer_id *buffer);
- status_t emptyBuffer(
+ status_t emptyBuffer_l(
OMX::buffer_id buffer,
OMX_U32 rangeOffset, OMX_U32 rangeLength,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
- status_t emptyGraphicBuffer(
+ status_t emptyGraphicBuffer_l(
OMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
- status_t emptyNativeHandleBuffer(
+ status_t emptyNativeHandleBuffer_l(
OMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
@@ -235,6 +230,9 @@
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+ status_t enableNativeBuffers_l(
+ OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+
status_t storeMetaDataInBuffers_l(
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
diff --git a/media/libstagefright/include/SecureBuffer.h b/media/libstagefright/include/SecureBuffer.h
index ac7399a..cf7933a 100644
--- a/media/libstagefright/include/SecureBuffer.h
+++ b/media/libstagefright/include/SecureBuffer.h
@@ -42,8 +42,6 @@
void *getDestinationPointer();
ICrypto::DestinationType getDestinationType();
- virtual sp<MediaCodecBuffer> clone(const sp<AMessage> &format) override;
-
private:
SecureBuffer() = delete;
diff --git a/media/libstagefright/include/SharedMemoryBuffer.h b/media/libstagefright/include/SharedMemoryBuffer.h
index c52e5c5..1d7f7a6 100644
--- a/media/libstagefright/include/SharedMemoryBuffer.h
+++ b/media/libstagefright/include/SharedMemoryBuffer.h
@@ -34,8 +34,6 @@
virtual ~SharedMemoryBuffer() = default;
- virtual sp<MediaCodecBuffer> clone(const sp<AMessage> &format) override;
-
private:
SharedMemoryBuffer() = delete;
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index 591b38e..1d1f2bd 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -29,6 +29,11 @@
struct ALooper;
+struct CodecProfileLevel {
+ OMX_U32 mProfile;
+ OMX_U32 mLevel;
+};
+
struct SimpleSoftOMXComponent : public SoftOMXComponent {
SimpleSoftOMXComponent(
const char *name,
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
deleted file mode 100644
index 65cb99a..0000000
--- a/media/libstagefright/include/WVMExtractor.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef WVM_EXTRACTOR_H_
-
-#define WVM_EXTRACTOR_H_
-
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/Errors.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-class DataSource;
-
-class WVMLoadableExtractor : public MediaExtractor {
-public:
- WVMLoadableExtractor() {}
- virtual ~WVMLoadableExtractor() {}
-
- virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0;
- virtual status_t getError() = 0;
- virtual status_t getEstimatedBandwidthKbps(int32_t *kbps) = 0;
- virtual void setAdaptiveStreamingMode(bool adaptive) = 0;
- virtual void setCryptoPluginMode(bool cryptoPluginMode) = 0;
- virtual void setError(status_t err) = 0;
- virtual void setUID(uid_t uid) = 0;
-};
-
-class WVMExtractor : public MediaExtractor {
-public:
- explicit WVMExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
- virtual sp<MetaData> getMetaData();
- virtual void setUID(uid_t uid);
-
- // Return the amount of data cached from the current
- // playback positiion (in us).
- // While more data is still being fetched *finalStatus == OK,
- // Once fetching is completed (no more data available), *finalStatus != OK
- // If fetching completed normally (i.e. reached EOS instead of IO error)
- // *finalStatus == ERROR_END_OF_STREAM
- int64_t getCachedDurationUs(status_t *finalStatus);
-
- // Return the current estimated bandwidth
- status_t getEstimatedBandwidthKbps(int32_t *kbps);
-
- // Set to use adaptive streaming mode by the WV component.
- // If adaptive == true, adaptive streaming mode will be used.
- // Default mode is non-adaptive streaming mode.
- // Should set to use adaptive streaming mode only if widevine:// protocol
- // is used.
- void setAdaptiveStreamingMode(bool adaptive);
-
- // setCryptoPluginMode(true) to select crypto plugin mode.
- // In this mode, the extractor returns encrypted data for use
- // with the MediaCodec model, which handles the decryption in the
- // codec.
- void setCryptoPluginMode(bool cryptoPluginMode);
-
- static bool getVendorLibHandle();
-
- status_t getError();
-
- void setError(status_t err);
-
-protected:
- virtual ~WVMExtractor();
-
-private:
- sp<DataSource> mDataSource;
- sp<WVMLoadableExtractor> mImpl;
-
- WVMExtractor(const WVMExtractor &);
- WVMExtractor &operator=(const WVMExtractor &);
-};
-
-bool SniffWVM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index b060628..a974671 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -377,6 +377,16 @@
*actualFrameTimeUs = -1ll;
+ if (seekTimeUs > INT64_MAX / 1000ll ||
+ seekTimeUs < INT64_MIN / 1000ll ||
+ (mExtractor->mSeekPreRollNs > 0 &&
+ (seekTimeUs * 1000ll) < INT64_MIN + mExtractor->mSeekPreRollNs) ||
+ (mExtractor->mSeekPreRollNs < 0 &&
+ (seekTimeUs * 1000ll) > INT64_MAX + mExtractor->mSeekPreRollNs)) {
+ ALOGE("cannot seek to %lld", (long long) seekTimeUs);
+ return;
+ }
+
const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
mkvparser::Segment* const pSegment = mExtractor->mSegment;
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index fdc9d7f..be4a932 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -356,9 +356,12 @@
mDebugLevelBumpPendingBuffers[1] = 0;
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
+ mPortMode[0] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[1] = IOMX::kPortModePresetByteBuffer;
mSecureBufferType[0] = kSecureBufferTypeUnknown;
mSecureBufferType[1] = kSecureBufferTypeUnknown;
mIsSecure = AString(name).endsWith(".secure");
+ mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled("legacy-adaptive");
}
OMXNodeInstance::~OMXNodeInstance() {
@@ -650,7 +653,114 @@
return StatusFromOMXError(err);
}
-status_t OMXNodeInstance::enableNativeBuffers(
+status_t OMXNodeInstance::setPortMode(OMX_U32 portIndex, IOMX::PortMode mode) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (portIndex >= NELEM(mPortMode)) {
+ ALOGE("b/31385713, portIndex(%u)", portIndex);
+ android_errorWriteLog(0x534e4554, "31385713");
+ return BAD_VALUE;
+ }
+
+ CLOG_CONFIG(setPortMode, "%s(%d), port %d", asString(mode), mode, portIndex);
+
+ switch (mode) {
+ case IOMX::kPortModeDynamicANWBuffer:
+ {
+ if (portIndex == kPortIndexOutput) {
+ if (mLegacyAdaptiveExperiment) {
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "not setting port mode to %s(%d) on output",
+ asString(mode), mode);
+ return StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+ }
+
+ status_t err = enableNativeBuffers_l(
+ portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+ if (err != OK) {
+ return err;
+ }
+ }
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+ }
+
+ case IOMX::kPortModeDynamicNativeHandle:
+ {
+ if (portIndex != kPortIndexInput) {
+ CLOG_ERROR(setPortMode, BAD_VALUE,
+ "%s(%d) mode is only supported on input port", asString(mode), mode);
+ return BAD_VALUE;
+ }
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+
+ MetadataBufferType metaType = kMetadataBufferTypeNativeHandleSource;
+ return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+ }
+
+ case IOMX::kPortModePresetSecureBuffer:
+ {
+ // Allow on both input and output.
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ return enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+ }
+
+ case IOMX::kPortModePresetANWBuffer:
+ {
+ if (portIndex != kPortIndexOutput) {
+ CLOG_ERROR(setPortMode, BAD_VALUE,
+ "%s(%d) mode is only supported on output port", asString(mode), mode);
+ return BAD_VALUE;
+ }
+
+ // Check if we're simulating legacy mode with metadata mode,
+ // if so, enable metadata mode.
+ if (mLegacyAdaptiveExperiment) {
+ if (storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL) == OK) {
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "metdata mode enabled successfully");
+ return OK;
+ }
+
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "unable to enable metadata mode on output");
+
+ mLegacyAdaptiveExperiment = false;
+ }
+
+ // Disable secure buffer and enable graphic buffer
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ status_t err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+ if (err != OK) {
+ return err;
+ }
+
+ // Not running experiment, or metadata is not supported.
+ // Disable metadata mode and use legacy mode.
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ return OK;
+ }
+
+ case IOMX::kPortModePresetByteBuffer:
+ {
+ // Disable secure buffer, native buffer and metadata.
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ return OK;
+ }
+
+ default:
+ break;
+ }
+
+ CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
+ return BAD_VALUE;
+}
+
+status_t OMXNodeInstance::enableNativeBuffers_l(
OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable) {
if (portIndex >= NELEM(mSecureBufferType)) {
ALOGE("b/31385713, portIndex(%u)", portIndex);
@@ -658,7 +768,6 @@
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
CLOG_CONFIG(enableNativeBuffers, "%s:%u%s, %d", portString(portIndex), portIndex,
graphic ? ", graphic" : "", enable);
OMX_STRING name = const_cast<OMX_STRING>(
@@ -735,13 +844,6 @@
return OK;
}
-status_t OMXNodeInstance::storeMetaDataInBuffers(
- OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
- Mutex::Autolock autolock(mLock);
- CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u en:%d", portString(portIndex), portIndex, enable);
- return storeMetaDataInBuffers_l(portIndex, enable, type);
-}
-
status_t OMXNodeInstance::storeMetaDataInBuffers_l(
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
if (mSailed) {
@@ -832,6 +934,12 @@
CLOG_CONFIG(prepareForAdaptivePlayback, "%s:%u en=%d max=%ux%u",
portString(portIndex), portIndex, enable, maxFrameWidth, maxFrameHeight);
+ if (mLegacyAdaptiveExperiment) {
+ CLOG_INTERNAL(prepareForAdaptivePlayback,
+ "Legacy adaptive experiment: reporting success");
+ return OK;
+ }
+
OMX_INDEXTYPE index;
OMX_STRING name = const_cast<OMX_STRING>(
"OMX.google.android.index.prepareForAdaptivePlayback");
@@ -902,43 +1010,62 @@
}
status_t OMXNodeInstance::useBuffer(
- OMX_U32 portIndex,
- const OMXBuffer &omxBuffer, OMX::buffer_id *buffer) {
- // TODO: the allotted size is probably no longer needed.
- if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeSharedMem) {
- return useBuffer(portIndex, omxBuffer.mMem, buffer, omxBuffer.mAllottedSize);
+ OMX_U32 portIndex, const OMXBuffer &omxBuffer, OMX::buffer_id *buffer) {
+ if (buffer == NULL) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
}
- if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeANWBuffer) {
- return useGraphicBuffer(portIndex, omxBuffer.mGraphicBuffer, buffer);
+ if (portIndex >= NELEM(mNumPortBuffers)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock autoLock(mLock);
+
+ switch (omxBuffer.mBufferType) {
+ case OMXBuffer::kBufferTypePreset:
+ return useBuffer_l(portIndex, NULL, buffer);
+
+ case OMXBuffer::kBufferTypeSharedMem:
+ return useBuffer_l(portIndex, omxBuffer.mMem, buffer);
+
+ case OMXBuffer::kBufferTypeANWBuffer:
+ return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+
+ default:
+ break;
}
return BAD_VALUE;
}
-status_t OMXNodeInstance::useBuffer(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize) {
- if (params == NULL || buffer == NULL) {
- ALOGE("b/25884056");
- return BAD_VALUE;
- }
-
- Mutex::Autolock autoLock(mLock);
- if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) {
- return BAD_VALUE;
- }
-
- return useBuffer_l(portIndex, params, buffer, allottedSize);
-}
-
status_t OMXNodeInstance::useBuffer_l(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize) {
+ OMX_U32 portIndex, const sp<IMemory> ¶ms, OMX::buffer_id *buffer) {
BufferMeta *buffer_meta;
OMX_BUFFERHEADERTYPE *header;
OMX_ERRORTYPE err = OMX_ErrorNone;
bool isMetadata = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
+
+ OMX_U32 allottedSize;
+ if (isMetadata) {
+ if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource) {
+ allottedSize = sizeof(VideoGrallocMetadata);
+ } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer) {
+ allottedSize = sizeof(VideoNativeMetadata);
+ } else if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource) {
+ allottedSize = sizeof(VideoNativeHandleMetadata);
+ } else {
+ return BAD_VALUE;
+ }
+ } else {
+ // NULL params is allowed only in metadata mode.
+ if (params == NULL) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
+ }
+ allottedSize = params->size();
+ }
+
bool isOutputGraphicMetadata = (portIndex == kPortIndexOutput) &&
(mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource ||
mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer);
@@ -968,13 +1095,6 @@
// metadata buffers are not connected cross process
// use a backup buffer instead of the actual buffer
if (isMetadata) {
- // TODO: this logic is very fishy, should it be removed?
- // if we are not connecting the buffers, the sizes must match
- if (params != NULL && allottedSize != params->size()) {
- CLOG_ERROR(useBuffer, BAD_VALUE, SIMPLE_BUFFER(portIndex, (size_t)allottedSize, data));
- return BAD_VALUE;
- }
-
data = new (std::nothrow) OMX_U8[allottedSize];
if (data == NULL) {
return NO_MEMORY;
@@ -984,8 +1104,6 @@
buffer_meta = new BufferMeta(
params, portIndex, false /* copy */, data);
} else {
- // NULL params is allowed only in metadata mode.
- CHECK(params != NULL);
data = static_cast<OMX_U8 *>(params->pointer());
buffer_meta = new BufferMeta(
@@ -1083,14 +1201,13 @@
// XXX: This function is here for backwards compatibility. Once the OMX
// implementations have been updated this can be removed and useGraphicBuffer2
// can be renamed to useGraphicBuffer.
-status_t OMXNodeInstance::useGraphicBuffer(
+status_t OMXNodeInstance::useGraphicBuffer_l(
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
OMX::buffer_id *buffer) {
if (graphicBuffer == NULL || buffer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
// First, see if we're in metadata mode. We could be running an experiment to simulate
// legacy behavior (preallocated buffers) on devices that supports meta.
@@ -1161,16 +1278,12 @@
return BAD_VALUE;
}
- OMX_U32 allottedSize = 0;
- if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource) {
- allottedSize = sizeof(VideoGrallocMetadata);
- } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer) {
- allottedSize = sizeof(VideoNativeMetadata);
- } else {
+ if (mMetadataType[portIndex] != kMetadataBufferTypeGrallocSource &&
+ mMetadataType[portIndex] != kMetadataBufferTypeANWBuffer) {
return BAD_VALUE;
}
- status_t err = useBuffer_l(portIndex, NULL, buffer, allottedSize);
+ status_t err = useBuffer_l(portIndex, NULL, buffer);
if (err != OK) {
return err;
}
@@ -1443,29 +1556,32 @@
status_t OMXNodeInstance::emptyBuffer(
buffer_id buffer, const OMXBuffer &omxBuffer,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- if (omxBuffer.mBufferType == OMXBuffer::kBufferTypePreset) {
- return emptyBuffer(
+ Mutex::Autolock autoLock(mLock);
+
+ switch (omxBuffer.mBufferType) {
+ case OMXBuffer::kBufferTypePreset:
+ return emptyBuffer_l(
buffer, 0, omxBuffer.mRangeLength, flags, timestamp, fenceFd);
- }
- if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeANWBuffer) {
- return emptyGraphicBuffer(
+ case OMXBuffer::kBufferTypeANWBuffer:
+ return emptyGraphicBuffer_l(
buffer, omxBuffer.mGraphicBuffer, flags, timestamp, fenceFd);
- }
- if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeNativeHandle) {
- return emptyNativeHandleBuffer(
+ case OMXBuffer::kBufferTypeNativeHandle:
+ return emptyNativeHandleBuffer_l(
buffer, omxBuffer.mNativeHandle, flags, timestamp, fenceFd);
+
+ default:
+ break;
}
return BAD_VALUE;
}
-status_t OMXNodeInstance::emptyBuffer(
+status_t OMXNodeInstance::emptyBuffer_l(
OMX::buffer_id buffer,
OMX_U32 rangeOffset, OMX_U32 rangeLength,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- Mutex::Autolock autoLock(mLock);
// no emptybuffer if using input surface
if (getBufferSource() != NULL) {
@@ -1614,11 +1730,9 @@
}
// like emptyBuffer, but the data is already in header->pBuffer
-status_t OMXNodeInstance::emptyGraphicBuffer(
+status_t OMXNodeInstance::emptyGraphicBuffer_l(
OMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- Mutex::Autolock autoLock(mLock);
-
OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
if (header == NULL) {
ALOGE("b/25884056");
@@ -1688,11 +1802,9 @@
return timestamp;
}
-status_t OMXNodeInstance::emptyNativeHandleBuffer(
+status_t OMXNodeInstance::emptyNativeHandleBuffer_l(
OMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- Mutex::Autolock autoLock(mLock);
-
OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
if (header == NULL) {
ALOGE("b/25884056");
@@ -1702,7 +1814,7 @@
status_t err = updateNativeHandleInMeta_l(
kPortIndexInput, nativeHandle, buffer, header);
if (err != OK) {
- CLOG_ERROR(emptyNativeHandleBuffer, err, FULL_BUFFER(
+ CLOG_ERROR(emptyNativeHandleBuffer_l, err, FULL_BUFFER(
(intptr_t)header->pBuffer, header, fenceFd));
return err;
}
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
index 3f533ff..401d64b 100644
--- a/media/libstagefright/omx/OMXUtils.h
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -51,6 +51,30 @@
const sp<IOMXNode> &omxNode,
DescribeColorFormat2Params &describeParams);
+inline static const char *asString(MetadataBufferType i, const char *def = "??") {
+ using namespace android;
+ switch (i) {
+ case kMetadataBufferTypeCameraSource: return "CameraSource";
+ case kMetadataBufferTypeGrallocSource: return "GrallocSource";
+ case kMetadataBufferTypeANWBuffer: return "ANWBuffer";
+ case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
+ case kMetadataBufferTypeInvalid: return "Invalid";
+ default: return def;
+ }
+}
+
+inline static const char *asString(IOMX::PortMode mode, const char *def = "??") {
+ using namespace android;
+ switch (mode) {
+ case IOMX::kPortModePresetByteBuffer: return "PresetByteBuffer";
+ case IOMX::kPortModePresetANWBuffer: return "PresetANWBuffer";
+ case IOMX::kPortModePresetSecureBuffer: return "PresetSecureBuffer";
+ case IOMX::kPortModeDynamicANWBuffer: return "DynamicANWBuffer";
+ case IOMX::kPortModeDynamicNativeHandle:return "DynamicNativeHandle";
+ default: return def;
+ }
+}
+
} // namespace android
#endif
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 935d7bf..1ce5d1a 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -800,7 +800,6 @@
using namespace android;
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
const char *me = argv[0];
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index 24f529b..e612a8d 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -37,8 +37,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
const char *rtpFilename = NULL;
const char *rtcpFilename = NULL;
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index a93770a..c6963b1 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -38,35 +38,6 @@
include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := Utils_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- Utils_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- liblog \
- libmedia \
- libstagefright \
- libstagefright_foundation \
- libstagefright_omx \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/include \
- frameworks/av/media/libstagefright \
- frameworks/av/media/libstagefright/include \
- $(TOP)/frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-include $(BUILD_NATIVE_TEST)
-
include $(CLEAR_VARS)
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 6f0087f..ae507fc 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -423,9 +423,11 @@
CHECK_GE(accessUnit->size(), rangeLength);
sp<GraphicBuffer> grbuf(new GraphicBuffer(
- rangeOffset + rangeLength, 1, HAL_PIXEL_FORMAT_Y8,
- GRALLOC_USAGE_HW_VIDEO_ENCODER, rangeOffset + rangeLength,
- handle, false));
+ rangeOffset + rangeLength /* width */, 1 /* height */,
+ HAL_PIXEL_FORMAT_Y8, 1 /* layerCount */,
+ GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ rangeOffset + rangeLength /* stride */, handle,
+ false /* keepOwnership */));
err = mHDCP->encryptNative(
grbuf, rangeOffset, rangeLength,
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3587cb9..f1ecca0 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -36,7 +36,6 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/AudioSource.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
@@ -748,8 +747,6 @@
status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
bool enableAudio, bool enableVideo) {
- DataSource::RegisterDefaultSniffers();
-
mExtractor = new NuMediaExtractor;
status_t err = mExtractor->setDataSource(
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index a4f999f..74729e4 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -45,6 +45,7 @@
LOCAL_SHARED_LIBRARIES := \
libbinder \
libmedia \
+ libmediadrm \
libstagefright \
libstagefright_foundation \
liblog \
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
index 54d22b1..21d1b5b 100644
--- a/media/utils/Android.mk
+++ b/media/utils/Android.mk
@@ -19,6 +19,8 @@
LOCAL_SRC_FILES := \
BatteryNotifier.cpp \
ISchedulingPolicyService.cpp \
+ MemoryLeakTrackUtil.cpp \
+ ProcessInfo.cpp \
SchedulingPolicyService.cpp
LOCAL_SHARED_LIBRARIES := \
@@ -26,6 +28,7 @@
libcutils \
liblog \
libutils \
+ libmemunreachable \
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 341d391..7a7321f 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -29,7 +29,7 @@
BatteryNotifier::getInstance().onBatteryStatServiceDied();
}
-BatteryNotifier::BatteryNotifier() : mVideoRefCount(0), mAudioRefCount(0) {}
+BatteryNotifier::BatteryNotifier() {}
BatteryNotifier::~BatteryNotifier() {
Mutex::Autolock _l(mLock);
@@ -38,67 +38,73 @@
}
}
-void BatteryNotifier::noteStartVideo() {
+void BatteryNotifier::noteStartVideo(int uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- if (mVideoRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStartVideo(AID_MEDIA);
+ if (mVideoRefCounts[uid] == 0 && batteryService != nullptr) {
+ batteryService->noteStartVideo(uid);
}
- mVideoRefCount++;
+ mVideoRefCounts[uid]++;
}
-void BatteryNotifier::noteStopVideo() {
+void BatteryNotifier::noteStopVideo(int uid) {
Mutex::Autolock _l(mLock);
- if (mVideoRefCount == 0) {
- ALOGW("%s: video refcount is broken.", __FUNCTION__);
+ if (mVideoRefCounts.find(uid) == mVideoRefCounts.end()) {
+ ALOGW("%s: video refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
return;
}
sp<IBatteryStats> batteryService = getBatteryService_l();
- mVideoRefCount--;
- if (mVideoRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStopVideo(AID_MEDIA);
+ mVideoRefCounts[uid]--;
+ if (mVideoRefCounts[uid] == 0) {
+ if (batteryService != nullptr) {
+ batteryService->noteStopVideo(uid);
+ }
+ mVideoRefCounts.erase(uid);
}
}
void BatteryNotifier::noteResetVideo() {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- mVideoRefCount = 0;
+ mVideoRefCounts.clear();
if (batteryService != nullptr) {
batteryService->noteResetVideo();
}
}
-void BatteryNotifier::noteStartAudio() {
+void BatteryNotifier::noteStartAudio(int uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- if (mAudioRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStartAudio(AID_AUDIOSERVER);
+ if (mAudioRefCounts[uid] == 0 && batteryService != nullptr) {
+ batteryService->noteStartAudio(uid);
}
- mAudioRefCount++;
+ mAudioRefCounts[uid]++;
}
-void BatteryNotifier::noteStopAudio() {
+void BatteryNotifier::noteStopAudio(int uid) {
Mutex::Autolock _l(mLock);
- if (mAudioRefCount == 0) {
- ALOGW("%s: audio refcount is broken.", __FUNCTION__);
+ if (mAudioRefCounts.find(uid) == mAudioRefCounts.end()) {
+ ALOGW("%s: audio refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
return;
}
sp<IBatteryStats> batteryService = getBatteryService_l();
- mAudioRefCount--;
- if (mAudioRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStopAudio(AID_AUDIOSERVER);
+ mAudioRefCounts[uid]--;
+ if (mAudioRefCounts[uid] == 0) {
+ if (batteryService != nullptr) {
+ batteryService->noteStopAudio(uid);
+ }
+ mAudioRefCounts.erase(uid);
}
}
void BatteryNotifier::noteResetAudio() {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- mAudioRefCount = 0;
+ mAudioRefCounts.clear();
if (batteryService != nullptr) {
batteryService->noteResetAudio();
}
@@ -176,7 +182,7 @@
Mutex::Autolock _l(mLock);
mBatteryStatService.clear();
mDeathNotifier.clear();
- // Do not reset mVideoRefCount and mAudioRefCount here. The ref
+ // Do not reset mVideoRefCounts and mAudioRefCounts here. The ref
// counting is independent of the battery service availability.
// We need this if battery service becomes available after media
// started.
@@ -205,11 +211,13 @@
// Notify start now if mediaserver or audioserver is already started.
// 1) mediaserver and audioserver is started before batterystats service
// 2) batterystats server may have crashed.
- if (mVideoRefCount > 0) {
- mBatteryStatService->noteStartVideo(AID_MEDIA);
+ std::map<int, int>::iterator it = mVideoRefCounts.begin();
+ for (; it != mVideoRefCounts.end(); ++it) {
+ mBatteryStatService->noteStartVideo(it->first);
}
- if (mAudioRefCount > 0) {
- mBatteryStatService->noteStartAudio(AID_AUDIOSERVER);
+ it = mAudioRefCounts.begin();
+ for (; it != mAudioRefCounts.end(); ++it) {
+ mBatteryStatService->noteStartAudio(it->first);
}
// TODO: Notify for camera and flashlight state as well?
}
diff --git a/media/libmedia/MemoryLeakTrackUtil.cpp b/media/utils/MemoryLeakTrackUtil.cpp
similarity index 100%
rename from media/libmedia/MemoryLeakTrackUtil.cpp
rename to media/utils/MemoryLeakTrackUtil.cpp
diff --git a/media/libstagefright/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
similarity index 100%
rename from media/libstagefright/ProcessInfo.cpp
rename to media/utils/ProcessInfo.cpp
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index 49048042..2ba4c76 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -37,11 +37,11 @@
public:
~BatteryNotifier();
- void noteStartVideo();
- void noteStopVideo();
+ void noteStartVideo(int uid);
+ void noteStopVideo(int uid);
void noteResetVideo();
- void noteStartAudio();
- void noteStopAudio();
+ void noteStartAudio(int uid);
+ void noteStopAudio(int uid);
void noteResetAudio();
void noteFlashlightOn(const String8& id, int uid);
void noteFlashlightOff(const String8& id, int uid);
@@ -58,8 +58,8 @@
};
Mutex mLock;
- int mVideoRefCount;
- int mAudioRefCount;
+ std::map<int, int> mVideoRefCounts;
+ std::map<int, int> mAudioRefCounts;
std::map<std::pair<String8, int>, bool> mFlashlightState;
std::map<std::pair<String8, int>, bool> mCameraState;
sp<IBatteryStats> mBatteryStatService;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 1657c08..e2a93ad 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -36,6 +36,7 @@
LOCAL_C_INCLUDES := \
$(TOPDIR)frameworks/av/services/audiopolicy \
+ $(TOPDIR)frameworks/av/services/medialog \
$(TOPDIR)external/sonic \
$(call include-path-for, audio-utils)
@@ -48,7 +49,8 @@
libutils \
liblog \
libbinder \
- libmedia \
+ libaudioclient \
+ libmedialogservice \
libmediautils \
libnbaio \
libpowermanager \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7e10e48..4d2049e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -35,6 +35,7 @@
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
#include <memunreachable/memunreachable.h>
#include <utils/String16.h>
#include <utils/threads.h>
@@ -113,38 +114,10 @@
// ----------------------------------------------------------------------------
-const char *formatToString(audio_format_t format) {
- switch (audio_get_main_format(format)) {
- case AUDIO_FORMAT_PCM:
- switch (format) {
- case AUDIO_FORMAT_PCM_16_BIT: return "pcm16";
- case AUDIO_FORMAT_PCM_8_BIT: return "pcm8";
- case AUDIO_FORMAT_PCM_32_BIT: return "pcm32";
- case AUDIO_FORMAT_PCM_8_24_BIT: return "pcm8.24";
- case AUDIO_FORMAT_PCM_FLOAT: return "pcmfloat";
- case AUDIO_FORMAT_PCM_24_BIT_PACKED: return "pcm24";
- default:
- break;
- }
- break;
- case AUDIO_FORMAT_MP3: return "mp3";
- case AUDIO_FORMAT_AMR_NB: return "amr-nb";
- case AUDIO_FORMAT_AMR_WB: return "amr-wb";
- case AUDIO_FORMAT_AAC: return "aac";
- case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1";
- case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2";
- case AUDIO_FORMAT_VORBIS: return "vorbis";
- case AUDIO_FORMAT_OPUS: return "opus";
- case AUDIO_FORMAT_AC3: return "ac-3";
- case AUDIO_FORMAT_E_AC3: return "e-ac-3";
- case AUDIO_FORMAT_IEC61937: return "iec61937";
- case AUDIO_FORMAT_DTS: return "dts";
- case AUDIO_FORMAT_DTS_HD: return "dts-hd";
- case AUDIO_FORMAT_DOLBY_TRUEHD: return "dolby-truehd";
- default:
- break;
- }
- return "unknown";
+std::string formatToString(audio_format_t format) {
+ std::string result;
+ FormatConverter::toString(format, result);
+ return result;
}
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 6c58613..e9c0f93 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -773,10 +773,10 @@
#undef INCLUDING_FROM_AUDIOFLINGER_H
-const char *formatToString(audio_format_t format);
-String8 inputFlagsToString(audio_input_flags_t flags);
-String8 outputFlagsToString(audio_output_flags_t flags);
-String8 devicesToString(audio_devices_t devices);
+std::string formatToString(audio_format_t format);
+std::string inputFlagsToString(audio_input_flags_t flags);
+std::string outputFlagsToString(audio_output_flags_t flags);
+std::string devicesToString(audio_devices_t devices);
const char *sourceToString(audio_source_t source);
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 582a5e8..945f4b3 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1611,8 +1611,13 @@
// in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
if (in == NULL || (((uintptr_t)in) & 3)) {
- memset(out, 0, numFrames
- * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ if ( AUDIO_FORMAT_PCM_FLOAT == t.mMixerFormat ) {
+ memset((char*)fout, 0, numFrames
+ * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ } else {
+ memset((char*)out, 0, numFrames
+ * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ }
ALOGE_IF((((uintptr_t)in) & 3),
"process__OneTrack16BitsStereoNoResampling: misaligned buffer"
" %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 96d9f97..022bf59 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -365,6 +365,8 @@
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
+ mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ ALOGV("Overriding auxiliary effect input as MONO and output as STEREO");
} else {
mConfig.inputCfg.channels = channelMask;
// TODO: Update this logic when multichannel effects are implemented.
@@ -597,11 +599,35 @@
return mStatus;
}
if (cmdCode == EFFECT_CMD_GET_PARAM &&
+ (sizeof(effect_param_t) > cmdSize ||
+ ((effect_param_t *)pCmdData)->psize > cmdSize
+ - sizeof(effect_param_t))) {
+ android_errorWriteLog(0x534e4554, "32438594");
+ android_errorWriteLog(0x534e4554, "33003822");
+ return -EINVAL;
+ }
+ if (cmdCode == EFFECT_CMD_GET_PARAM &&
(*replySize < sizeof(effect_param_t) ||
((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
android_errorWriteLog(0x534e4554, "29251553");
return -EINVAL;
}
+ if (cmdCode == EFFECT_CMD_GET_PARAM &&
+ (sizeof(effect_param_t) > *replySize
+ || ((effect_param_t *)pCmdData)->psize > *replySize
+ - sizeof(effect_param_t)
+ || ((effect_param_t *)pCmdData)->vsize > *replySize
+ - sizeof(effect_param_t)
+ - ((effect_param_t *)pCmdData)->psize
+ || roundUpDelta(((effect_param_t *)pCmdData)->psize, (uint32_t)sizeof(int)) >
+ *replySize
+ - sizeof(effect_param_t)
+ - ((effect_param_t *)pCmdData)->psize
+ - ((effect_param_t *)pCmdData)->vsize)) {
+ ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: reply size inconsistent");
+ android_errorWriteLog(0x534e4554, "32705438");
+ return -EINVAL;
+ }
if ((cmdCode == EFFECT_CMD_SET_PARAM
|| cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED) && // DEFERRED not generally used
(sizeof(effect_param_t) > cmdSize
@@ -1047,7 +1073,7 @@
mConfig.inputCfg.samplingRate,
mConfig.inputCfg.channels,
mConfig.inputCfg.format,
- formatToString((audio_format_t)mConfig.inputCfg.format),
+ formatToString((audio_format_t)mConfig.inputCfg.format).c_str(),
mConfig.inputCfg.buffer.raw);
result.append(buffer);
@@ -1059,7 +1085,7 @@
mConfig.outputCfg.samplingRate,
mConfig.outputCfg.channels,
mConfig.outputCfg.format,
- formatToString((audio_format_t)mConfig.outputCfg.format));
+ formatToString((audio_format_t)mConfig.outputCfg.format).c_str());
result.append(buffer);
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -1278,36 +1304,54 @@
// particular client process: no risk to block the whole media server process or mixer
// threads if we are stuck here
Mutex::Autolock _l(mCblk->lock);
- if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE ||
- mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
+
+ // keep local copy of index in case of client corruption b/32220769
+ const uint32_t clientIndex = mCblk->clientIndex;
+ const uint32_t serverIndex = mCblk->serverIndex;
+ if (clientIndex > EFFECT_PARAM_BUFFER_SIZE ||
+ serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
mCblk->serverIndex = 0;
mCblk->clientIndex = 0;
return BAD_VALUE;
}
status_t status = NO_ERROR;
- while (mCblk->serverIndex < mCblk->clientIndex) {
- int reply;
- uint32_t rsize = sizeof(int);
- int *p = (int *)(mBuffer + mCblk->serverIndex);
- int size = *p++;
- if (((uint8_t *)p + size) > mBuffer + mCblk->clientIndex) {
+ effect_param_t *param = NULL;
+ for (uint32_t index = serverIndex; index < clientIndex;) {
+ int *p = (int *)(mBuffer + index);
+ const int size = *p++;
+ if (size < 0
+ || size > EFFECT_PARAM_BUFFER_SIZE
+ || ((uint8_t *)p + size) > mBuffer + clientIndex) {
ALOGW("command(): invalid parameter block size");
+ status = BAD_VALUE;
break;
}
- effect_param_t *param = (effect_param_t *)p;
- if (param->psize == 0 || param->vsize == 0) {
- ALOGW("command(): null parameter or value size");
- mCblk->serverIndex += size;
- continue;
+
+ // copy to local memory in case of client corruption b/32220769
+ param = (effect_param_t *)realloc(param, size);
+ if (param == NULL) {
+ ALOGW("command(): out of memory");
+ status = NO_MEMORY;
+ break;
}
- uint32_t psize = sizeof(effect_param_t) +
- ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
- param->vsize;
+ memcpy(param, p, size);
+
+ int reply = 0;
+ uint32_t rsize = sizeof(reply);
status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM,
- psize,
- p,
+ size,
+ param,
&rsize,
&reply);
+
+ // verify shared memory: server index shouldn't change; client index can't go back.
+ if (serverIndex != mCblk->serverIndex
+ || clientIndex > mCblk->clientIndex) {
+ android_errorWriteLog(0x534e4554, "32220769");
+ status = BAD_VALUE;
+ break;
+ }
+
// stop at first error encountered
if (ret != NO_ERROR) {
status = ret;
@@ -1317,8 +1361,9 @@
*(int *)pReplyData = reply;
break;
}
- mCblk->serverIndex += size;
+ index += size;
}
+ free(param);
mCblk->serverIndex = 0;
mCblk->clientIndex = 0;
return status;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index cfa3e1a..0bcb9a0 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -32,7 +32,7 @@
void *buffer,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_output_flags_t flags,
track_type type);
virtual ~Track();
@@ -188,7 +188,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int uid);
+ uid_t uid);
virtual ~OutputTrack();
virtual status_t start(AudioSystem::sync_event_t event =
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 123e033..883ff6b 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -30,7 +30,7 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_input_flags_t flags,
track_type type);
virtual ~RecordTrack();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 6b23e56..1d7b946 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -29,10 +29,12 @@
#include <cutils/properties.h>
#include <media/AudioParameter.h>
#include <media/AudioResamplerPublic.h>
+#include <media/TypeConverter.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <private/media/AudioTrackShared.h>
+#include <private/android_filesystem_config.h>
#include <audio_utils/conversion.h>
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
@@ -448,168 +450,28 @@
}
}
-String8 devicesToString(audio_devices_t devices)
+std::string devicesToString(audio_devices_t devices)
{
- static const struct mapping {
- audio_devices_t mDevices;
- const char * mString;
- } mappingsOut[] = {
- {AUDIO_DEVICE_OUT_EARPIECE, "EARPIECE"},
- {AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER"},
- {AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET"},
- {AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "BLUETOOTH_SCO"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, "BLUETOOTH_SCO_CARKIT"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,"BLUETOOTH_A2DP_HEADPHONES"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, "BLUETOOTH_A2DP_SPEAKER"},
- {AUDIO_DEVICE_OUT_AUX_DIGITAL, "AUX_DIGITAL"},
- {AUDIO_DEVICE_OUT_HDMI, "HDMI"},
- {AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,"ANLG_DOCK_HEADSET"},
- {AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,"DGTL_DOCK_HEADSET"},
- {AUDIO_DEVICE_OUT_USB_ACCESSORY, "USB_ACCESSORY"},
- {AUDIO_DEVICE_OUT_USB_DEVICE, "USB_DEVICE"},
- {AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX"},
- {AUDIO_DEVICE_OUT_LINE, "LINE"},
- {AUDIO_DEVICE_OUT_HDMI_ARC, "HDMI_ARC"},
- {AUDIO_DEVICE_OUT_SPDIF, "SPDIF"},
- {AUDIO_DEVICE_OUT_FM, "FM"},
- {AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE"},
- {AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE"},
- {AUDIO_DEVICE_OUT_IP, "IP"},
- {AUDIO_DEVICE_OUT_BUS, "BUS"},
- {AUDIO_DEVICE_NONE, "NONE"}, // must be last
- }, mappingsIn[] = {
- {AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION"},
- {AUDIO_DEVICE_IN_AMBIENT, "AMBIENT"},
- {AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC"},
- {AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
- {AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET"},
- {AUDIO_DEVICE_IN_AUX_DIGITAL, "AUX_DIGITAL"},
- {AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL"},
- {AUDIO_DEVICE_IN_TELEPHONY_RX, "TELEPHONY_RX"},
- {AUDIO_DEVICE_IN_BACK_MIC, "BACK_MIC"},
- {AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX"},
- {AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET"},
- {AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET"},
- {AUDIO_DEVICE_IN_USB_ACCESSORY, "USB_ACCESSORY"},
- {AUDIO_DEVICE_IN_USB_DEVICE, "USB_DEVICE"},
- {AUDIO_DEVICE_IN_FM_TUNER, "FM_TUNER"},
- {AUDIO_DEVICE_IN_TV_TUNER, "TV_TUNER"},
- {AUDIO_DEVICE_IN_LINE, "LINE"},
- {AUDIO_DEVICE_IN_SPDIF, "SPDIF"},
- {AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
- {AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK"},
- {AUDIO_DEVICE_IN_IP, "IP"},
- {AUDIO_DEVICE_IN_BUS, "BUS"},
- {AUDIO_DEVICE_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_devices_t allDevices = AUDIO_DEVICE_NONE;
- const mapping *entry;
+ std::string result;
if (devices & AUDIO_DEVICE_BIT_IN) {
- devices &= ~AUDIO_DEVICE_BIT_IN;
- entry = mappingsIn;
+ InputDeviceConverter::maskToString(devices, result);
} else {
- entry = mappingsOut;
- }
- for ( ; entry->mDevices != AUDIO_DEVICE_NONE; entry++) {
- allDevices = (audio_devices_t) (allDevices | entry->mDevices);
- if (devices & entry->mDevices) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (devices & ~allDevices) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", devices & ~allDevices);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
+ OutputDeviceConverter::maskToString(devices, result);
}
return result;
}
-String8 inputFlagsToString(audio_input_flags_t flags)
+std::string inputFlagsToString(audio_input_flags_t flags)
{
- static const struct mapping {
- audio_input_flags_t mFlag;
- const char * mString;
- } mappings[] = {
- {AUDIO_INPUT_FLAG_FAST, "FAST"},
- {AUDIO_INPUT_FLAG_HW_HOTWORD, "HW_HOTWORD"},
- {AUDIO_INPUT_FLAG_RAW, "RAW"},
- {AUDIO_INPUT_FLAG_SYNC, "SYNC"},
- {AUDIO_INPUT_FLAG_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
- const mapping *entry;
- for (entry = mappings; entry->mFlag != AUDIO_INPUT_FLAG_NONE; entry++) {
- allFlags = (audio_input_flags_t) (allFlags | entry->mFlag);
- if (flags & entry->mFlag) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (flags & ~allFlags) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", flags & ~allFlags);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
- }
+ std::string result;
+ InputFlagConverter::maskToString(flags, result);
return result;
}
-String8 outputFlagsToString(audio_output_flags_t flags)
+std::string outputFlagsToString(audio_output_flags_t flags)
{
- static const struct mapping {
- audio_output_flags_t mFlag;
- const char * mString;
- } mappings[] = {
- {AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT"},
- {AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY"},
- {AUDIO_OUTPUT_FLAG_FAST, "FAST"},
- {AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER"},
- {AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,"COMPRESS_OFFLOAD"},
- {AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING"},
- {AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC"},
- {AUDIO_OUTPUT_FLAG_RAW, "RAW"},
- {AUDIO_OUTPUT_FLAG_SYNC, "SYNC"},
- {AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO"},
- {AUDIO_OUTPUT_FLAG_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
- const mapping *entry;
- for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
- allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
- if (flags & entry->mFlag) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (flags & ~allFlags) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", flags & ~allFlags);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
- }
+ std::string result;
+ OutputFlagConverter::maskToString(flags, result);
return result;
}
@@ -943,12 +805,12 @@
dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
- dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
+ dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat).c_str());
dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
dprintf(fd, " Channel count: %u\n", mChannelCount);
dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
- dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+ dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat).c_str());
dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize);
dprintf(fd, " Pending config events:");
size_t numConfig = mConfigEvents.size();
@@ -961,8 +823,8 @@
} else {
dprintf(fd, " none\n");
}
- dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).string());
- dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).string());
+ dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
+ dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
if (locked) {
@@ -1040,7 +902,8 @@
}
if (!mNotifiedBatteryStart) {
- BatteryNotifier::getInstance().noteStartAudio();
+ // TODO: call this function for each track when it becomes active.
+ BatteryNotifier::getInstance().noteStartAudio(AID_AUDIOSERVER);
mNotifiedBatteryStart = true;
}
gBoottime.acquire(mWakeLockToken);
@@ -1067,16 +930,12 @@
}
if (mNotifiedBatteryStart) {
- BatteryNotifier::getInstance().noteStopAudio();
+ // TODO: call this function for each track when it becomes inactive.
+ BatteryNotifier::getInstance().noteStopAudio(AID_AUDIOSERVER);
mNotifiedBatteryStart = false;
}
}
-void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
- Mutex::Autolock _l(mLock);
- updateWakeLockUids_l(uids);
-}
-
void AudioFlinger::ThreadBase::getPowerManager_l() {
if (mSystemReady && mPowerManager == 0) {
// use checkService() to avoid blocking if power service is not up yet
@@ -1102,10 +961,10 @@
return;
}
if (mPowerManager != 0) {
- sp<IBinder> binder = new BBinder();
- status_t status;
- status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
- true /* FIXME force oneway contrary to .aidl */);
+ std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
+ status_t status = mPowerManager->updateWakeLockUids(
+ mWakeLockToken, uidsAsInt.size(), uidsAsInt.data(),
+ true /* FIXME force oneway contrary to .aidl */);
ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
}
}
@@ -1830,8 +1689,8 @@
dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
AudioStreamOut *output = mOutput;
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
- String8 flagsAsString = outputFlagsToString(flags);
- dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
+ dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
+ output, flags, outputFlagsToString(flags).c_str());
dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
if (mPipeSink.get() != nullptr) {
@@ -1872,7 +1731,7 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t tid,
- int uid,
+ uid_t uid,
status_t *status)
{
size_t frameCount = *pFrameCount;
@@ -4569,7 +4428,7 @@
{
uint32_t trackCount = 0;
for (size_t i = 0; i < mTracks.size() ; i++) {
- if (mTracks[i]->uid() == (int)uid) {
+ if (mTracks[i]->uid() == uid) {
trackCount++;
}
}
@@ -6545,7 +6404,7 @@
size_t *pFrameCount,
audio_session_t sessionId,
size_t *notificationFrames,
- int uid,
+ uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
status_t *status)
@@ -6876,6 +6735,10 @@
dumpBase(fd, args);
+ AudioStreamIn *input = mInput;
+ audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
+ dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
+ input, flags, inputFlagsToString(flags).c_str());
if (mActiveTracks.size() == 0) {
dprintf(fd, " No active record clients\n");
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index da0c705..5235cde 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -404,7 +404,6 @@
virtual void acquireWakeLock_l(int uid = -1);
void releaseWakeLock();
void releaseWakeLock_l();
- void updateWakeLockUids(const SortedVector<int> &uids);
void updateWakeLockUids_l(const SortedVector<int> &uids);
void getPowerManager_l();
void setEffectSuspended_l(const effect_uuid_t *type,
@@ -588,7 +587,7 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t tid,
- int uid,
+ uid_t uid,
status_t *status /*non-NULL*/);
AudioStreamOut* getOutput() const;
@@ -1286,7 +1285,7 @@
size_t *pFrameCount,
audio_session_t sessionId,
size_t *notificationFrames,
- int uid,
+ uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
status_t *status /*non-NULL*/);
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 7c48375..4fcb596 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -62,7 +62,7 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
bool isOut,
alloc_type alloc = ALLOC_CBLK,
track_type type = TYPE_DEFAULT);
@@ -75,7 +75,7 @@
sp<IMemory> getCblk() const { return mCblkMemory; }
audio_track_cblk_t* cblk() const { return mCblk; }
audio_session_t sessionId() const { return mSessionId; }
- int uid() const { return mUid; }
+ uid_t uid() const { return mUid; }
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
sp<IMemory> getBuffers() const { return mBufferMemory; }
@@ -153,7 +153,7 @@
// openRecord(), and then adjusted as needed
const audio_session_t mSessionId;
- int mUid;
+ uid_t mUid;
Vector < sp<SyncEvent> >mSyncEvents;
const bool mIsOut;
sp<ServerProxy> mServerProxy;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e1fe7e2..8f134c1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -72,7 +72,7 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int clientUid,
+ uid_t clientUid,
bool isOut,
alloc_type alloc,
track_type type)
@@ -99,10 +99,10 @@
mThreadIoHandle(thread->id())
{
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || clientUid == -1) {
- ALOGW_IF(clientUid != -1 && clientUid != (int)callingUid,
+ if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+ ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
- clientUid = (int)callingUid;
+ clientUid = callingUid;
}
// clientUid contains the uid of the app that is responsible for this track, so we can blame
// battery usage on it.
@@ -341,7 +341,7 @@
void *buffer,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_output_flags_t flags,
track_type type)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
@@ -1138,7 +1138,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int uid)
+ uid_t uid)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
sampleRate, format, channelMask, frameCount,
NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
@@ -1474,7 +1474,7 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_input_flags_t flags,
track_type type)
: TrackBase(thread, client, sampleRate, format,
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 91cc3d2..9f3488d 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -4,19 +4,9 @@
LOCAL_SRC_FILES:= \
service/AudioPolicyService.cpp \
- service/AudioPolicyEffects.cpp
-
-ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SRC_FILES += \
- service/AudioPolicyInterfaceImplLegacy.cpp \
- service/AudioPolicyClientImplLegacy.cpp
-
- LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
-else
-LOCAL_SRC_FILES += \
+ service/AudioPolicyEffects.cpp \
service/AudioPolicyInterfaceImpl.cpp \
service/AudioPolicyClientImpl.cpp
-endif
LOCAL_C_INCLUDES := \
$(TOPDIR)frameworks/av/services/audioflinger \
@@ -30,14 +20,10 @@
libutils \
liblog \
libbinder \
- libmedia \
+ libaudioclient \
libhardware_legacy \
- libserviceutility
-
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SHARED_LIBRARIES += \
+ libserviceutility \
libaudiopolicymanager
-endif
LOCAL_STATIC_LIBRARIES := \
libmedia_helper \
@@ -52,8 +38,6 @@
include $(BUILD_SHARED_LIBRARY)
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= managerdefault/AudioPolicyManager.cpp
@@ -90,9 +74,10 @@
$(TOPDIR)frameworks/av/services/audiopolicy/utilities
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicycomponents
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
LOCAL_STATIC_LIBRARIES += libxml2
@@ -135,7 +120,6 @@
include $(BUILD_SHARED_LIBRARY)
endif
-endif
#######################################################################
# Recursive call sub-folder Android.mk
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 71d70de..31f0550 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -47,16 +47,6 @@
#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
/**
- * Stub audio output device. Used in policy configuration file on platforms without audio outputs.
- * This alias value to AUDIO_DEVICE_OUT_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_OUT_STUB AUDIO_DEVICE_OUT_DEFAULT
-/**
- * Stub audio input device. Used in policy configuration file on platforms without audio inputs.
- * This alias value to AUDIO_DEVICE_IN_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_IN_STUB AUDIO_DEVICE_IN_DEFAULT
-/**
* Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
* control APIs (e.g setStreamVolumeIndex().
*/
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 8e3dbad..84e3a36 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -16,201 +16,19 @@
#pragma once
+#include <media/TypeConverter.h>
+
#include "policy.h"
#include <Volume.h>
-#include <media/AudioParameter.h>
-#include <system/audio.h>
-#include <convert/convert.h>
-#include <utils/Log.h>
-#include <string>
-#include <utils/Vector.h>
-#include <utils/SortedVector.h>
namespace android {
-struct SampleRateTraits
-{
- typedef uint32_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct DeviceTraits
-{
- typedef audio_devices_t Type;
- typedef Vector<Type> Collection;
-};
-struct OutputFlagTraits
-{
- typedef audio_output_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct InputFlagTraits
-{
- typedef audio_input_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct FormatTraits
-{
- typedef audio_format_t Type;
- typedef Vector<Type> Collection;
-};
-struct ChannelTraits
-{
- typedef audio_channel_mask_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct OutputChannelTraits : public ChannelTraits {};
-struct InputChannelTraits : public ChannelTraits {};
-struct ChannelIndexTraits : public ChannelTraits {};
-struct GainModeTraits
-{
- typedef audio_gain_mode_t Type;
- typedef Vector<Type> Collection;
-};
-struct StreamTraits
-{
- typedef audio_stream_type_t Type;
- typedef Vector<Type> Collection;
-};
struct DeviceCategoryTraits
{
typedef device_category Type;
typedef Vector<Type> Collection;
};
-struct AudioModeTraits
-{
- typedef audio_mode_t Type;
- typedef Vector<Type> Collection;
-};
-template <typename T>
-struct DefaultTraits
-{
- typedef T Type;
- typedef Vector<Type> Collection;
-};
-template <class Traits>
-static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
- const char *del = AudioParameter::valueListSeparator)
-{
- char *literal = strdup(str.c_str());
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type value;
- if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
- collection.add(value);
- }
- }
- free(literal);
-}
-
-template <class Traits>
-class TypeConverter
-{
-public:
- static bool toString(const typename Traits::Type &value, std::string &str);
-
- static bool fromString(const std::string &str, typename Traits::Type &result);
-
- static void collectionFromString(const std::string &str,
- typename Traits::Collection &collection,
- const char *del = AudioParameter::valueListSeparator);
-
- static uint32_t maskFromString(
- const std::string &str, const char *del = AudioParameter::valueListSeparator);
-
- static void maskToString(
- uint32_t mask, std::string &str, const char *del = AudioParameter::valueListSeparator);
-
-protected:
- struct Table {
- const char *literal;
- typename Traits::Type value;
- };
-
- static const Table mTable[];
-};
-
-typedef TypeConverter<DeviceTraits> DeviceConverter;
-typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
-typedef TypeConverter<InputFlagTraits> InputFlagConverter;
-typedef TypeConverter<FormatTraits> FormatConverter;
-typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
-typedef TypeConverter<InputChannelTraits> InputChannelConverter;
-typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
-typedef TypeConverter<GainModeTraits> GainModeConverter;
-typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
-typedef TypeConverter<AudioModeTraits> AudioModeConverter;
-
-inline
-static SampleRateTraits::Collection samplingRatesFromString(
- const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator)
-{
- SampleRateTraits::Collection samplingRateCollection;
- collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
- return samplingRateCollection;
-}
-
-inline
-static FormatTraits::Collection formatsFromString(
- const std::string &formats, const char *del = AudioParameter::valueListSeparator)
-{
- FormatTraits::Collection formatCollection;
- FormatConverter::collectionFromString(formats, formatCollection, del);
- return formatCollection;
-}
-
-inline
-static audio_format_t formatFromString(const std::string &literalFormat)
-{
- audio_format_t format;
- if (literalFormat.empty()) {
- return gDynamicFormat;
- }
- FormatConverter::fromString(literalFormat, format);
- return format;
-}
-
-inline
-static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
-{
- audio_channel_mask_t channels;
- if (!OutputChannelConverter::fromString(literalChannels, channels) ||
- !InputChannelConverter::fromString(literalChannels, channels)) {
- return AUDIO_CHANNEL_INVALID;
- }
- return channels;
-}
-
-inline
-static ChannelTraits::Collection channelMasksFromString(
- const std::string &channels, const char *del = AudioParameter::valueListSeparator)
-{
- ChannelTraits::Collection channelMaskCollection;
- OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
- InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
- return channelMaskCollection;
-}
-
-inline
-static InputChannelTraits::Collection inputChannelMasksFromString(
- const std::string &inChannels, const char *del = AudioParameter::valueListSeparator)
-{
- InputChannelTraits::Collection inputChannelMaskCollection;
- InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
- return inputChannelMaskCollection;
-}
-
-inline
-static OutputChannelTraits::Collection outputChannelMasksFromString(
- const std::string &outChannels, const char *del = AudioParameter::valueListSeparator)
-{
- OutputChannelTraits::Collection outputChannelMaskCollection;
- OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
- return outputChannelMaskCollection;
-}
}; // namespace android
-
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index f382dec..0daae6c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -54,7 +54,7 @@
for (size_t i = 0; i < mPatch.num_sources; i++) {
if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
std::string device;
- DeviceConverter::toString(mPatch.sources[i].ext.device.type, device);
+ deviceToString(mPatch.sources[i].ext.device.type, device);
snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
mPatch.sources[i].id,
device.c_str());
@@ -69,7 +69,7 @@
for (size_t i = 0; i < mPatch.num_sinks; i++) {
if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
std::string device;
- DeviceConverter::toString(mPatch.sinks[i].ext.device.type, device);
+ deviceToString(mPatch.sinks[i].ext.device.type, device);
snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
mPatch.sinks[i].id,
device.c_str());
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index d751f07..e5888e2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -18,12 +18,11 @@
//#define LOG_NDEBUG 0
#include "ConfigParsingUtils.h"
-#include <convert/convert.h>
#include "AudioGain.h"
#include "IOProfile.h"
-#include "TypeConverter.h"
#include <system/audio.h>
#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
#include <utils/Log.h>
#include <cutils/misc.h>
@@ -106,7 +105,7 @@
audio_devices_t type = AUDIO_DEVICE_NONE;
while (node) {
if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
- DeviceConverter::fromString(node->value, type);
+ deviceFromString(node->value, type);
break;
}
node = node->next;
@@ -294,7 +293,7 @@
while (devTag != NULL) {
if (strlen(devTag) != 0) {
audio_devices_t type;
- if (DeviceConverter::fromString(devTag, type)) {
+ if (deviceFromString(devTag, type)) {
uint32_t inBit = type & AUDIO_DEVICE_BIT_IN;
type &= ~AUDIO_DEVICE_BIT_IN;
while (type) {
@@ -341,7 +340,7 @@
config.addAvailableOutputDevices(availableOutputDevices);
} else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
audio_devices_t device = AUDIO_DEVICE_NONE;
- DeviceConverter::fromString(node->value, device);
+ deviceFromString(node->value, device);
if (device != AUDIO_DEVICE_NONE) {
sp<DeviceDescriptor> defaultOutputDevice = new DeviceDescriptor(device);
config.setDefaultOutputDevice(defaultOutputDevice);
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ba2b9e3..f0e48b6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -285,7 +285,7 @@
result.append(buffer);
}
std::string deviceLiteral;
- if (DeviceConverter::toString(mDeviceType, deviceLiteral)) {
+ if (deviceToString(mDeviceType, deviceLiteral)) {
snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
result.append(buffer);
}
@@ -302,7 +302,7 @@
void DeviceDescriptor::log() const
{
std::string device;
- DeviceConverter::toString(mDeviceType, device);
+ deviceToString(mDeviceType, device);
ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId, mDeviceType, device.c_str(),
mAddress.string());
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 44f382b..a224004 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -18,7 +18,7 @@
//#define LOG_NDEBUG 0
#include "Serializer.h"
-#include <convert/convert.h>
+#include <media/convert.h>
#include "TypeConverter.h"
#include <libxml/parser.h>
#include <libxml/xinclude.h>
@@ -199,7 +199,8 @@
string format = getXmlAttribute(root, Attributes::format);
string channels = getXmlAttribute(root, Attributes::channelMasks);
- profile = new Element(formatFromString(format), channelMasksFromString(channels, ","),
+ profile = new Element(formatFromString(format, gDynamicFormat),
+ channelMasksFromString(channels, ","),
samplingRatesFromString(samplingRates, ","));
profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
@@ -300,7 +301,7 @@
AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
audio_devices_t type = AUDIO_DEVICE_NONE;
- if (!DeviceConverter::fromString(typeName, type) ||
+ if (!deviceFromString(typeName, type) ||
(!audio_is_input_device(type) && portRole == AUDIO_PORT_ROLE_SOURCE) ||
(!audio_is_output_devices(type) && portRole == AUDIO_PORT_ROLE_SINK)) {
ALOGW("%s: bad type %08x", __FUNCTION__, type);
@@ -383,6 +384,7 @@
sp<AudioPort> source = ctx->findPortByTagName(String8(devTag));
if (source == NULL) {
ALOGE("%s: no source found with name=%s", __FUNCTION__, devTag);
+ free(sourcesLiteral);
return BAD_VALUE;
}
sources.add(source);
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index cfc0985..4839683 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -19,282 +19,17 @@
namespace android {
#define MAKE_STRING_FROM_ENUM(string) { #string, string }
-
-template <>
-const DeviceConverter::Table DeviceConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
-};
-
-
-template <>
-const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
-};
-
-
-template <>
-const InputFlagConverter::Table InputFlagConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
-};
-
-
-template <>
-const FormatConverter::Table FormatConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
-};
-
-
-template <>
-const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-
-
-template <>
-const InputChannelConverter::Table InputChannelConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-
-template <>
-const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
- {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
- {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
- {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
- {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
- {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
- {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
- {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
- {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
-};
-
-
-template <>
-const GainModeConverter::Table GainModeConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
-};
-
+#define TERMINATOR { .literal = nullptr }
template <>
const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEADSET),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
- MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
+ MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+ TERMINATOR
};
-
-template <>
-const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
-};
-
-
-template<>
-const AudioModeConverter::Table AudioModeConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
- MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
-};
-
-
-template <class Traits>
-bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
-{
- for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
- if (mTable[i].value == value) {
- str = mTable[i].literal;
- return true;
- }
- }
- char result[64];
- snprintf(result, sizeof(result), "Unknown enum value %d", value);
- str = result;
- return false;
-}
-
-template <class Traits>
-bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
-{
- for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
- if (strcmp(mTable[i].literal, str.c_str()) == 0) {
- ALOGV("stringToEnum() found %s", mTable[i].literal);
- result = mTable[i].value;
- return true;
- }
- }
- return false;
-}
-
-template <class Traits>
-void TypeConverter<Traits>::collectionFromString(const std::string &str,
- typename Traits::Collection &collection,
- const char *del)
-{
- char *literal = strdup(str.c_str());
-
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type value;
- if (fromString(cstr, value)) {
- collection.add(value);
- }
- }
- free(literal);
-}
-
-template <class Traits>
-uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
-{
- char *literal = strdup(str.c_str());
- uint32_t value = 0;
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type type;
- if (fromString(cstr, type)) {
- value |= static_cast<uint32_t>(type);
- }
- }
- free(literal);
- return value;
-}
-
-template <class Traits>
-void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
-{
- bool first_flag = true;
- for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
- if ((mask & mTable[i].value) == mTable[i].value) {
- if (!first_flag) str += del;
- first_flag = false;
- str += mTable[i].literal;
- }
- }
-}
-
-template class TypeConverter<DeviceTraits>;
-template class TypeConverter<OutputFlagTraits>;
-template class TypeConverter<InputFlagTraits>;
-template class TypeConverter<FormatTraits>;
-template class TypeConverter<OutputChannelTraits>;
-template class TypeConverter<InputChannelTraits>;
-template class TypeConverter<ChannelIndexTraits>;
-template class TypeConverter<GainModeTraits>;
-template class TypeConverter<StreamTraits>;
template class TypeConverter<DeviceCategoryTraits>;
-template class TypeConverter<AudioModeTraits>;
}; // namespace android
-
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index 6dba75b..08c677e 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -39,15 +39,19 @@
LOCAL_MODULE := libaudiopolicyengineconfigurable
LOCAL_MODULE_TAGS := optional
+
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicypfwwrapper \
libaudiopolicycomponents \
libxml2
LOCAL_SHARED_LIBRARIES := \
+ liblog \
libcutils \
libutils \
+ liblog \
libaudioutils \
libparameter
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index f4283a8..d031bc3 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -8,15 +8,16 @@
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include \
- $(TARGET_OUT_HEADERS)/parameter \
$(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
$(TOPDIR)frameworks/av/services/audiopolicy/utilities/convert \
LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
-LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+
+LOCAL_SHARED_LIBRARIES := \
+ libparameter \
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 6872e52..8d51293 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -22,7 +22,7 @@
#include <ParameterMgrPlatformConnector.h>
#include <SelectionCriterionTypeInterface.h>
#include <SelectionCriterionInterface.h>
-#include <convert.h>
+#include <media/convert.h>
#include <algorithm>
#include <cutils/config_utils.h>
#include <cutils/misc.h>
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index e6de8ae..c1bb3fb 100644
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -34,8 +34,9 @@
LOCAL_MODULE := libaudiopolicyenginedefault
LOCAL_MODULE_TAGS := optional
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicycomponents \
libxml2
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2f01b02..a714041 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1754,7 +1754,7 @@
audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) {
SoundTrigger::setCaptureState(true);
}
@@ -4477,9 +4477,7 @@
bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2) {
- return ((stream1 == stream2) ||
- ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
- ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
+ return (stream1 == stream2);
}
uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
deleted file mode 100644
index aa228aa..0000000
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include "Configuration.h"
-#undef __STRICT_ANSI__
-#define __STDINT_LIMITS
-#define __STDC_LIMIT_MACROS
-#include <stdint.h>
-
-#include <sys/time.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
-#include <cutils/properties.h>
-#include <binder/IPCThreadState.h>
-#include <utils/String16.h>
-#include <utils/threads.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-#include <hardware_legacy/power.h>
-#include <media/AudioEffect.h>
-//#include <media/IAudioFlinger.h>
-
-#include <hardware/hardware.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <audio_effects/audio_effects_conf.h>
-#include <media/AudioParameter.h>
-
-
-namespace android {
-
-/* implementation of the interface to the policy manager */
-extern "C" {
-
-audio_module_handle_t aps_load_hw_module(void *service __unused,
- const char *name)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_MODULE_HANDLE_NONE;
- }
-
- return af->loadHwModule(name);
-}
-
-static audio_io_handle_t open_output(audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL ||
- pDevices == NULL || pLatencyMs == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = *pSamplingRate;
- config.format = *pFormat;
- config.channel_mask = *pChannelMask;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = af->openOutput(module, &output, &config, pDevices,
- String8(""), pLatencyMs, flags);
- if (status == NO_ERROR) {
- *pSamplingRate = config.sample_rate;
- *pFormat = config.format;
- *pChannelMask = config.channel_mask;
- if (offloadInfo != NULL) {
- *((audio_offload_info_t *)offloadInfo) = config.offload_info;
- }
- }
- return output;
-}
-
-// deprecated: replaced by aps_open_output_on_module()
-audio_io_handle_t aps_open_output(void *service __unused,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags)
-{
- return open_output(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask,
- pLatencyMs, flags, NULL);
-}
-
-audio_io_handle_t aps_open_output_on_module(void *service __unused,
- audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
- pLatencyMs, flags, offloadInfo);
-}
-
-audio_io_handle_t aps_open_dup_output(void *service __unused,
- audio_io_handle_t output1,
- audio_io_handle_t output2)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return 0;
- }
- return af->openDuplicateOutput(output1, output2);
-}
-
-int aps_close_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->closeOutput(output);
-}
-
-int aps_suspend_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return PERMISSION_DENIED;
- }
-
- return af->suspendOutput(output);
-}
-
-int aps_restore_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return PERMISSION_DENIED;
- }
-
- return af->restoreOutput(output);
-}
-
-static audio_io_handle_t open_input(audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL || pDevices == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (((*pDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) == AUDIO_DEVICE_IN_REMOTE_SUBMIX)
- && !captureAudioOutputAllowed(IPCThreadState::self()->getCallingPid(),
- IPCThreadState::self()->getCallingUid())) {
- ALOGE("open_input() permission denied: capture not allowed");
- return AUDIO_IO_HANDLE_NONE;
- }
-
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;;
- config.sample_rate = *pSamplingRate;
- config.format = *pFormat;
- config.channel_mask = *pChannelMask;
- audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = af->openInput(module, &input, &config, pDevices,
- String8(""), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_FAST /*FIXME*/);
- if (status == NO_ERROR) {
- *pSamplingRate = config.sample_rate;
- *pFormat = config.format;
- *pChannelMask = config.channel_mask;
- }
- return input;
-}
-
-
-// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
-audio_io_handle_t aps_open_input(void *service __unused,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- audio_in_acoustics_t acoustics __unused)
-{
- return open_input(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-audio_io_handle_t aps_open_input_on_module(void *service __unused,
- audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
-{
- return open_input(module, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-int aps_close_input(void *service __unused, audio_io_handle_t input)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->closeInput(input);
-}
-
-int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->invalidateStream(stream);
-}
-
-int aps_move_effects(void *service __unused, audio_session_t session,
- audio_io_handle_t src_output,
- audio_io_handle_t dst_output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->moveEffects(session, src_output, dst_output);
-}
-
-char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
- const char *keys)
-{
- String8 result = AudioSystem::getParameters(io_handle, String8(keys));
- return strdup(result.string());
-}
-
-void aps_set_parameters(void *service, audio_io_handle_t io_handle,
- const char *kv_pairs, int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
-}
-
-int aps_set_stream_volume(void *service, audio_stream_type_t stream,
- float volume, audio_io_handle_t output,
- int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->setStreamVolume(stream, volume, output,
- delay_ms);
-}
-
-int aps_start_tone(void *service, audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->startTone(tone, stream);
-}
-
-int aps_stop_tone(void *service)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->stopTone();
-}
-
-int aps_set_voice_volume(void *service, float volume, int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->setVoiceVolume(volume, delay_ms);
-}
-
-}; // extern "C"
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
deleted file mode 100644
index a5b96fe..0000000
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <media/AudioPolicyHelper.h>
-
-namespace android {
-
-
-// ----------------------------------------------------------------------------
-
-status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
- audio_policy_dev_state_t state,
- const char *device_address,
- const char *device_name __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
- return BAD_VALUE;
- }
- if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
- state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
- return BAD_VALUE;
- }
-
- ALOGV("setDeviceConnectionState()");
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
- state, device_address);
-}
-
-audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
- audio_devices_t device,
- const char *device_address)
-{
- if (mpAudioPolicy == NULL) {
- return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
- }
- return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
- device_address);
-}
-
-status_t AudioPolicyService::setPhoneState(audio_mode_t state)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(state) >= AUDIO_MODE_CNT) {
- return BAD_VALUE;
- }
-
- ALOGV("setPhoneState()");
-
- // TODO: check if it is more appropriate to do it in platform specific policy manager
- AudioSystem::setMode(state);
-
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
- mPhoneState = state;
- return NO_ERROR;
-}
-
-audio_mode_t AudioPolicyService::getPhoneState()
-{
- Mutex::Autolock _l(mLock);
- return mPhoneState;
-}
-
-status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
- audio_policy_forced_cfg_t config)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
- return BAD_VALUE;
- }
- if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
- return BAD_VALUE;
- }
- ALOGV("setForceUse()");
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
- return NO_ERROR;
-}
-
-audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
-{
- if (mpAudioPolicy == NULL) {
- return AUDIO_POLICY_FORCE_NONE;
- }
- if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
- return AUDIO_POLICY_FORCE_NONE;
- }
- return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
-}
-
-audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_IO_HANDLE_NONE;
- }
- if (mpAudioPolicy == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
- ALOGV("getOutput()");
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
- format, channelMask, flags, offloadInfo);
-}
-
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- ALOGV("startOutput()");
- // create audio processors according to stream
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on session %d", session);
- }
- }
-
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
-}
-
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- ALOGV("stopOutput()");
- mOutputCommandThread->stopOutputCommand(output, stream, session);
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- ALOGV("doStopOutput from tid %d", gettid());
- // release audio processors from the stream
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to release effects on session %d", session);
- }
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
-}
-
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (mpAudioPolicy == NULL) {
- return;
- }
- ALOGV("releaseOutput()");
- mOutputCommandThread->releaseOutputCommand(output, stream, session);
-}
-
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream __unused,
- audio_session_t session __unused)
-{
- ALOGV("doReleaseOutput from tid %d", gettid());
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->release_output(mpAudioPolicy, output);
-}
-
-status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *input,
- audio_session_t session,
- pid_t pid __unused,
- uid_t uid __unused,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_input_flags_t flags __unused,
- audio_port_handle_t selectedDeviceId __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
-
- audio_source_t inputSource = attr->source;
-
- // already checked by client, but double-check in case the client wrapper is bypassed
- if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD &&
- inputSource != AUDIO_SOURCE_FM_TUNER) {
- return BAD_VALUE;
- }
-
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
-
- if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
- return BAD_VALUE;
- }
-
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- // the audio_in_acoustics_t parameter is ignored by get_input()
- *input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
- format, channelMask, (audio_in_acoustics_t) 0);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (*input == AUDIO_IO_HANDLE_NONE) {
- return INVALID_OPERATION;
- }
-
- if (audioPolicyEffects != 0) {
- // create audio pre processors according to input source
- status_t status = audioPolicyEffects->addInputEffects(*input, inputSource, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on input %d", input);
- }
- }
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::startInput(audio_io_handle_t input,
- audio_session_t session __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- Mutex::Autolock _l(mLock);
-
- return mpAudioPolicy->start_input(mpAudioPolicy, input);
-}
-
-status_t AudioPolicyService::stopInput(audio_io_handle_t input,
- audio_session_t session __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- Mutex::Autolock _l(mLock);
-
- return mpAudioPolicy->stop_input(mpAudioPolicy, input);
-}
-
-void AudioPolicyService::releaseInput(audio_io_handle_t input,
- audio_session_t session)
-{
- if (mpAudioPolicy == NULL) {
- return;
- }
-
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->release_input(mpAudioPolicy, input);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- // release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input, session);
- if(status != NO_ERROR) {
- ALOGW("Failed to release effects on input %d", input);
- }
- }
-}
-
-status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
- int index,
- audio_devices_t device)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (mpAudioPolicy->set_stream_volume_index_for_device) {
- return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
- stream,
- index,
- device);
- } else {
- return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
- }
-}
-
-status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
- int *index,
- audio_devices_t device)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (mpAudioPolicy->get_stream_volume_index_for_device) {
- return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
- stream,
- index,
- device);
- } else {
- return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
- }
-}
-
-uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return 0;
- }
- if (mpAudioPolicy == NULL) {
- return 0;
- }
- return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
-}
-
-//audio policy: use audio_device_t appropriately
-
-audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_DEVICE_NONE;
- }
- if (mpAudioPolicy == NULL) {
- return AUDIO_DEVICE_NONE;
- }
- return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
-}
-
-audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
-{
- // FIXME change return type to status_t, and return NO_INIT here
- if (mpAudioPolicy == NULL) {
- return 0;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
-}
-
-status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
- audio_io_handle_t io,
- uint32_t strategy,
- audio_session_t session,
- int id)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
-}
-
-status_t AudioPolicyService::unregisterEffect(int id)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
-}
-
-status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
-}
-
-bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return false;
- }
- if (mpAudioPolicy == NULL) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return false;
- }
- if (mpAudioPolicy == NULL) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isSourceActive(audio_source_t source) const
-{
- if (mpAudioPolicy == NULL) {
- return false;
- }
- if (mpAudioPolicy->is_source_active == 0) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
-}
-
-status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
- effect_descriptor_t *descriptors,
- uint32_t *count)
-{
- if (mpAudioPolicy == NULL) {
- *count = 0;
- return NO_INIT;
- }
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects == 0) {
- *count = 0;
- return NO_INIT;
- }
- return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
-}
-
-bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
-{
- if (mpAudioPolicy == NULL) {
- ALOGV("mpAudioPolicy == NULL");
- return false;
- }
-
- if (mpAudioPolicy->is_offload_supported == NULL) {
- ALOGV("HAL does not implement is_offload_supported");
- return false;
- }
-
- return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
-}
-
-status_t AudioPolicyService::listAudioPorts(audio_port_role_t role __unused,
- audio_port_type_t type __unused,
- unsigned int *num_ports,
- struct audio_port *ports __unused,
- unsigned int *generation __unused)
-{
- *num_ports = 0;
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getAudioPort(struct audio_port *port __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::createAudioPatch(const struct audio_patch *patch __unused,
- audio_patch_handle_t *handle __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseAudioPatch(audio_patch_handle_t handle __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches __unused,
- unsigned int *generation __unused)
-{
- *num_patches = 0;
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setAudioPortConfig(const struct audio_port_config *config __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session __unused,
- audio_stream_type_t *stream,
- uid_t uid __unused,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- audio_port_handle_t selectedDeviceId __unused,
- const audio_offload_info_t *offloadInfo)
-{
- if (attr != NULL) {
- *stream = audio_attributes_to_stream_type(attr);
- } else {
- if (*stream == AUDIO_STREAM_DEFAULT) {
- return BAD_VALUE;
- }
- }
- *output = getOutput(*stream, samplingRate, format, channelMask,
- flags, offloadInfo);
- if (*output == AUDIO_IO_HANDLE_NONE) {
- return INVALID_OPERATION;
- }
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session __unused,
- audio_io_handle_t *ioHandle __unused,
- audio_devices_t *device __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes __unused,
- bool registration __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_patch_handle_t *handle)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setMasterMono(bool mono)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getMasterMono(bool *mono)
-{
- return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index b6b6116..c4f6367 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -39,11 +39,6 @@
#include <system/audio.h>
#include <system/audio_policy.h>
-#ifdef USE_LEGACY_AUDIO_POLICY
-#include <hardware/hardware.h>
-#include <hardware/audio_policy.h>
-#endif
-
namespace android {
static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
@@ -54,11 +49,6 @@
static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
- extern struct audio_policy_service_ops aps_ops;
-};
-#endif
// ----------------------------------------------------------------------------
@@ -80,40 +70,8 @@
// start output activity command thread
mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
-#ifdef USE_LEGACY_AUDIO_POLICY
- ALOGI("AudioPolicyService CSTOR in legacy mode");
-
- /* instantiate the audio policy manager */
- const struct hw_module_t *module;
- int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
- if (rc) {
- return;
- }
- rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
- ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
- if (rc) {
- return;
- }
-
- rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
- &mpAudioPolicy);
- ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
- if (rc) {
- return;
- }
-
- rc = mpAudioPolicy->init_check(mpAudioPolicy);
- ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
- if (rc) {
- return;
- }
- ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
-#else
- ALOGI("AudioPolicyService CSTOR in new mode");
-
mAudioPolicyClient = new AudioPolicyClient(this);
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
-#endif
}
// load audio processing modules
sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
@@ -129,17 +87,8 @@
mAudioCommandThread->exit();
mOutputCommandThread->exit();
-#ifdef USE_LEGACY_AUDIO_POLICY
- if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
- mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
- }
- if (mpAudioPolicyDev != NULL) {
- audio_policy_dev_close(mpAudioPolicyDev);
- }
-#else
destroyAudioPolicyManager(mAudioPolicyManager);
delete mAudioPolicyClient;
-#endif
mNotificationClients.clear();
mAudioPolicyEffects.clear();
@@ -187,14 +136,12 @@
Mutex::Autolock _l(mNotificationClientsLock);
mNotificationClients.removeItem(uid);
}
-#ifndef USE_LEGACY_AUDIO_POLICY
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
mAudioPolicyManager->releaseResourcesForUid(uid);
}
}
-#endif
}
void AudioPolicyService::onAudioPortListUpdate()
@@ -360,11 +307,7 @@
char buffer[SIZE];
String8 result;
-#ifdef USE_LEGACY_AUDIO_POLICY
- snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
-#else
snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager);
-#endif
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
@@ -394,15 +337,9 @@
mTonePlaybackThread->dump(fd);
}
-#ifdef USE_LEGACY_AUDIO_POLICY
- if (mpAudioPolicy) {
- mpAudioPolicy->dump(mpAudioPolicy, fd);
- }
-#else
if (mAudioPolicyManager) {
mAudioPolicyManager->dump(fd);
}
-#endif
if (locked) mLock.unlock();
}
@@ -1210,29 +1147,4 @@
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
- struct audio_policy_service_ops aps_ops = {
- .open_output = aps_open_output,
- .open_duplicate_output = aps_open_dup_output,
- .close_output = aps_close_output,
- .suspend_output = aps_suspend_output,
- .restore_output = aps_restore_output,
- .open_input = aps_open_input,
- .close_input = aps_close_input,
- .set_stream_volume = aps_set_stream_volume,
- .invalidate_stream = aps_invalidate_stream,
- .set_parameters = aps_set_parameters,
- .get_parameters = aps_get_parameters,
- .start_tone = aps_start_tone,
- .stop_tone = aps_stop_tone,
- .set_voice_volume = aps_set_voice_volume,
- .move_effects = aps_move_effects,
- .load_hw_module = aps_load_hw_module,
- .open_output_on_module = aps_open_output_on_module,
- .open_input_on_module = aps_open_input_on_module,
- };
-}; // namespace <unnamed>
-#endif
-
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a310735..8eb4f2d 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -30,10 +30,6 @@
#include <media/ToneGenerator.h>
#include <media/AudioEffect.h>
#include <media/AudioPolicy.h>
-#ifdef USE_LEGACY_AUDIO_POLICY
-#include <hardware/audio_policy.h>
-#include <hardware_legacy/AudioPolicyInterface.h>
-#endif
#include "AudioPolicyEffects.h"
#include "managerdefault/AudioPolicyManager.h"
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index dd9029e..85faac6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -470,8 +470,8 @@
// CameraInfo is for android.hardware.Camera which does not
// support external camera facing. The closest approximation would be
// front camera.
- if (cameraInfo->orientation == CAMERA_FACING_EXTERNAL) {
- cameraInfo->orientation = CAMERA_FACING_FRONT;
+ if (cameraInfo->facing == CAMERA_FACING_EXTERNAL) {
+ cameraInfo->facing = CAMERA_FACING_FRONT;
}
}
return rc;
@@ -2556,7 +2556,8 @@
write(fd, result.string(), result.size());
} else {
result.appendFormat(" Facing: %s\n",
- info.facing == CAMERA_FACING_BACK ? "BACK" : "FRONT");
+ info.facing == CAMERA_FACING_BACK ? "BACK" :
+ info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
result.appendFormat(" Orientation: %d\n", info.orientation);
int deviceVersion;
if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index bcd62d6..bfbf640 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -993,7 +993,7 @@
}
status_t Camera2Client::startRecordingL(Parameters ¶ms, bool restart) {
- status_t res;
+ status_t res = OK;
ALOGV("%s: state == %d, restart = %d", __FUNCTION__, params.state, restart);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 8ce84ae..9a7839b 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -958,21 +958,40 @@
return NO_INIT;
}
+ // Get supported preview fps ranges.
+ Vector<Size> supportedPreviewSizes;
+ Vector<FpsRange> supportedPreviewFpsRanges;
+ const Size PREVIEW_SIZE_BOUND = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
+ status_t res = getFilteredSizes(PREVIEW_SIZE_BOUND, &supportedPreviewSizes);
+ if (res != OK) return res;
+ for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+ if (!isFpsSupported(supportedPreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
+ continue;
+ }
+ FpsRange fpsRange = {availableFpsRanges.data.i32[i], availableFpsRanges.data.i32[i+1]};
+ supportedPreviewFpsRanges.add(fpsRange);
+ }
+ if (supportedPreviewFpsRanges.size() == 0) {
+ ALOGE("Supported preview fps range is empty");
+ return NO_INIT;
+ }
+
int32_t bestStillCaptureFpsRange[2] = {
- availableFpsRanges.data.i32[0], availableFpsRanges.data.i32[1]
+ supportedPreviewFpsRanges[0].low, supportedPreviewFpsRanges[0].high
};
int32_t curRange =
bestStillCaptureFpsRange[1] - bestStillCaptureFpsRange[0];
- for (size_t i = 2; i < availableFpsRanges.count; i += 2) {
+ for (size_t i = 1; i < supportedPreviewFpsRanges.size(); i ++) {
int32_t nextRange =
- availableFpsRanges.data.i32[i + 1] -
- availableFpsRanges.data.i32[i];
+ supportedPreviewFpsRanges[i].high -
+ supportedPreviewFpsRanges[i].low;
if ( (nextRange > curRange) || // Maximize size of FPS range first
(nextRange == curRange && // Then minimize low-end FPS
- bestStillCaptureFpsRange[0] > availableFpsRanges.data.i32[i])) {
+ bestStillCaptureFpsRange[0] > supportedPreviewFpsRanges[i].low)) {
- bestStillCaptureFpsRange[0] = availableFpsRanges.data.i32[i];
- bestStillCaptureFpsRange[1] = availableFpsRanges.data.i32[i + 1];
+ bestStillCaptureFpsRange[0] = supportedPreviewFpsRanges[i].low;
+ bestStillCaptureFpsRange[1] = supportedPreviewFpsRanges[i].high;
curRange = nextRange;
}
}
@@ -2900,6 +2919,7 @@
}
// Get min frame duration for each size and check if the given fps range can be supported.
+ const int32_t FPS_MARGIN = 1;
for (size_t i = 0 ; i < sizes.size(); i++) {
int64_t minFrameDuration = getMinFrameDurationNs(sizes[i], format);
if (minFrameDuration <= 0) {
@@ -2908,6 +2928,8 @@
return false;
}
int32_t maxSupportedFps = 1e9 / minFrameDuration;
+ // Add some margin here for the case where the hal supports 29.xxxfps.
+ maxSupportedFps += FPS_MARGIN;
if (fps > maxSupportedFps) {
return false;
}
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 5ada30c..c8ecbba 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -115,6 +115,11 @@
int32_t height;
};
+ struct FpsRange {
+ int32_t low;
+ int32_t high;
+ };
+
int32_t exposureCompensation;
bool autoExposureLock;
bool autoWhiteBalanceLock;
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index 1f01144..5a5d7b7 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -221,7 +221,8 @@
status_t res = OK;
buffer.fenceFd = -1;
buffer.graphicBuffer = mAllocator->createGraphicBuffer(
- info.width, info.height, info.format, info.combinedUsage, &res);
+ info.width, info.height, info.format, 1 /* layerCount */,
+ info.combinedUsage, &res);
ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
__FUNCTION__, info.width, info.height, info.format,
buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 2f3251f..3705e8f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2226,6 +2226,14 @@
}
}
+void Camera3Device::removeInFlightMapEntryLocked(int idx) {
+ mInFlightMap.removeItemsAt(idx, 1);
+
+ // Indicate idle inFlightMap to the status tracker
+ if (mInFlightMap.size() == 0) {
+ mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
+ }
+}
void Camera3Device::removeInFlightRequestIfReadyLocked(int idx) {
@@ -2261,13 +2269,7 @@
returnOutputBuffers(request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(), 0);
- mInFlightMap.removeItemsAt(idx, 1);
-
- // Indicate idle inFlightMap to the status tracker
- if (mInFlightMap.size() == 0) {
- mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
- }
-
+ removeInFlightMapEntryLocked(idx);
ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
}
@@ -2960,6 +2962,8 @@
}
}
mRequestQueue.clear();
+
+ Mutex::Autolock al(mTriggerMutex);
mTriggerMap.clear();
if (lastFrameNumber != NULL) {
*lastFrameNumber = mRepeatingLastFrameNumber;
@@ -3442,6 +3446,20 @@
captureRequest->mResultExtras);
}
}
+
+ // Remove yet-to-be submitted inflight request from inflightMap
+ {
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != NULL) {
+ Mutex::Autolock l(parent->mInFlightLock);
+ ssize_t idx = parent->mInFlightMap.indexOfKey(captureRequest->mResultExtras.frameNumber);
+ if (idx >= 0) {
+ ALOGV("%s: Remove inflight request from queue: frameNumber %" PRId64,
+ __FUNCTION__, captureRequest->mResultExtras.frameNumber);
+ parent->removeInFlightMapEntryLocked(idx);
+ }
+ }
+ }
}
Mutex::Autolock l(mRequestLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 31901bc..ac9dfc2 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -852,6 +852,9 @@
/**** Scope for mInFlightLock ****/
+ // Remove the in-flight map entry of the given index from mInFlightMap.
+ // It must only be called with mInFlightLock held.
+ void removeInFlightMapEntryLocked(int idx);
// Remove the in-flight request of the given index from mInFlightMap
// if it's no longer needed. It must only be called with mInFlightLock held.
void removeInFlightRequestIfReadyLocked(int idx);
diff --git a/services/mediacodec/minijail/Android.mk b/services/mediacodec/minijail/Android.mk
index d2becb4..de05bc3 100644
--- a/services/mediacodec/minijail/Android.mk
+++ b/services/mediacodec/minijail/Android.mk
@@ -15,9 +15,8 @@
endif
# allow device specific additions to the syscall whitelist
-ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy))
- LOCAL_SRC_FILES += $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy
-endif
+LOCAL_SRC_FILES += $(wildcard $(foreach dir, $(BOARD_SECCOMP_POLICY), \
+ $(dir)/mediacodec-seccomp.policy))
include $(BUILD_SYSTEM)/base_rules.mk
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 4ce5c38..38aa472 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -25,7 +25,6 @@
libbinder \
libcutils \
liblog \
- libmedia \
libmediadrm \
libutils \
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
index 3a93340..6b01e77 100644
--- a/services/mediaextractor/minijail/Android.mk
+++ b/services/mediaextractor/minijail/Android.mk
@@ -9,9 +9,8 @@
LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
# allow device specific additions to the syscall whitelist
-ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy))
- LOCAL_SRC_FILES += $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy
-endif
+LOCAL_SRC_FILES += $(wildcard $(foreach dir, $(BOARD_SECCOMP_POLICY), \
+ $(dir)/mediaextractor-seccomp.policy))
include $(BUILD_SYSTEM)/base_rules.mk
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
index 67976ff..189855c 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
@@ -35,6 +35,7 @@
getgid32: 1
getegid32: 1
getgroups32: 1
+nanosleep: 1
# for attaching to debuggerd on process crash
socketcall: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 88f98cf..a1da63d 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -2,9 +2,9 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := MediaLogService.cpp
+LOCAL_SRC_FILES := MediaLogService.cpp IMediaLogService.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libnbaio
+LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/media/libmedia/IMediaLogService.cpp b/services/medialog/IMediaLogService.cpp
similarity index 100%
rename from media/libmedia/IMediaLogService.cpp
rename to services/medialog/IMediaLogService.cpp
diff --git a/services/mediaresourcemanager/Android.mk b/services/mediaresourcemanager/Android.mk
index e9bc955..c9cd8cc 100644
--- a/services/mediaresourcemanager/Android.mk
+++ b/services/mediaresourcemanager/Android.mk
@@ -4,7 +4,7 @@
LOCAL_SRC_FILES := ResourceManagerService.cpp ServiceLog.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+LOCAL_SHARED_LIBRARIES := libmedia libmediautils libbinder libutils liblog
LOCAL_MODULE:= libresourcemanagerservice
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 219c372..4344506 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -17,20 +17,39 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES:= \
- RadioService.cpp \
- RadioHalLegacy.cpp
+LOCAL_SRC_FILES:= \
+ RadioService.cpp
LOCAL_SHARED_LIBRARIES:= \
liblog \
libutils \
libbinder \
libcutils \
- libmedia \
+ libaudioclient \
libhardware \
libradio \
libradio_metadata
+ifeq ($(ENABLE_TREBLE),true)
+# Treble configuration
+LOCAL_CFLAGS += -DENABLE_TREBLE
+LOCAL_SRC_FILES += \
+ HidlUtils.cpp \
+ RadioHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+ libhwbinder \
+ libhidlbase \
+ libhidltransport \
+ libbase \
+ android.hardware.broadcastradio@1.0
+else
+# libhardware configuration
+LOCAL_SRC_FILES += \
+ RadioHalLegacy.cpp
+endif
+
+
LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/radio/HidlUtils.cpp b/services/radio/HidlUtils.cpp
new file mode 100644
index 0000000..bfced7a
--- /dev/null
+++ b/services/radio/HidlUtils.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "HidlUtils"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/radio_metadata.h>
+
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::MetadataType;
+using android::hardware::broadcastradio::V1_0::Band;
+using android::hardware::broadcastradio::V1_0::Deemphasis;
+using android::hardware::broadcastradio::V1_0::Rds;
+
+//static
+int HidlUtils::convertHalResult(Result result)
+{
+ switch (result) {
+ case Result::OK:
+ return 0;
+ case Result::INVALID_ARGUMENTS:
+ return -EINVAL;
+ case Result::INVALID_STATE:
+ return -ENOSYS;
+ case Result::TIMEOUT:
+ return -ETIMEDOUT;
+ case Result::NOT_INITIALIZED:
+ default:
+ return -ENODEV;
+ }
+}
+
+
+//static
+void HidlUtils::convertBandConfigToHal(BandConfig *halConfig,
+ const radio_hal_band_config_t *config)
+{
+ halConfig->type = static_cast<Band>(config->type);
+ halConfig->antennaConnected = config->antenna_connected;
+ halConfig->lowerLimit = config->lower_limit;
+ halConfig->upperLimit = config->upper_limit;
+ halConfig->spacings.setToExternal(const_cast<unsigned int *>(&config->spacings[0]),
+ config->num_spacings * sizeof(uint32_t));
+ // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+ halConfig->spacings.resize(config->num_spacings);
+
+ if (halConfig->type == Band::FM) {
+ halConfig->ext.fm.deemphasis = static_cast<Deemphasis>(config->fm.deemphasis);
+ halConfig->ext.fm.stereo = config->fm.stereo;
+ halConfig->ext.fm.rds = static_cast<Rds>(config->fm.rds);
+ halConfig->ext.fm.ta = config->fm.ta;
+ halConfig->ext.fm.af = config->fm.af;
+ halConfig->ext.fm.ea = config->fm.ea;
+ } else {
+ halConfig->ext.am.stereo = config->am.stereo;
+ }
+}
+
+//static
+void HidlUtils::convertPropertiesFromHal(radio_hal_properties_t *properties,
+ const Properties *halProperties)
+{
+ properties->class_id = static_cast<radio_class_t>(halProperties->classId);
+ strlcpy(properties->implementor, halProperties->implementor.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->product, halProperties->product.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->version, halProperties->version.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->serial, halProperties->serial.c_str(), RADIO_STRING_LEN_MAX);
+ properties->num_tuners = halProperties->numTuners;
+ properties->num_audio_sources = halProperties->numAudioSources;
+ properties->supports_capture = halProperties->supportsCapture;
+ properties->num_bands = halProperties->bands.size();
+
+ for (size_t i = 0; i < halProperties->bands.size(); i++) {
+ convertBandConfigFromHal(&properties->bands[i], &halProperties->bands[i]);
+ }
+}
+
+//static
+void HidlUtils::convertBandConfigFromHal(radio_hal_band_config_t *config,
+ const BandConfig *halConfig)
+{
+ config->type = static_cast<radio_band_t>(halConfig->type);
+ config->antenna_connected = halConfig->antennaConnected;
+ config->lower_limit = halConfig->lowerLimit;
+ config->upper_limit = halConfig->upperLimit;
+ config->num_spacings = halConfig->spacings.size();
+ if (config->num_spacings > RADIO_NUM_SPACINGS_MAX) {
+ config->num_spacings = RADIO_NUM_SPACINGS_MAX;
+ }
+ memcpy(config->spacings, halConfig->spacings.data(),
+ sizeof(uint32_t) * config->num_spacings);
+
+ if (halConfig->type == Band::FM) {
+ config->fm.deemphasis = static_cast<radio_deemphasis_t>(halConfig->ext.fm.deemphasis);
+ config->fm.stereo = halConfig->ext.fm.stereo;
+ config->fm.rds = static_cast<radio_rds_t>(halConfig->ext.fm.rds);
+ config->fm.ta = halConfig->ext.fm.ta;
+ config->fm.af = halConfig->ext.fm.af;
+ config->fm.ea = halConfig->ext.fm.ea;
+ } else {
+ config->am.stereo = halConfig->ext.am.stereo;
+ }
+}
+
+
+//static
+void HidlUtils::convertProgramInfoFromHal(radio_program_info_t *info,
+ const ProgramInfo *halInfo,
+ bool withMetadata)
+{
+ info->channel = halInfo->channel;
+ info->sub_channel = halInfo->subChannel;
+ info->tuned = halInfo->tuned;
+ info->stereo = halInfo->stereo;
+ info->digital = halInfo->digital;
+ info->signal_strength = halInfo->signalStrength;
+ if (withMetadata && halInfo->metadata.size() != 0) {
+ convertMetaDataFromHal(&info->metadata, halInfo->metadata,
+ halInfo->channel, halInfo->subChannel);
+ }
+}
+
+//static
+void HidlUtils::convertMetaDataFromHal(radio_metadata_t **metadata,
+ const hidl_vec<MetaData>& halMetadata,
+ uint32_t channel,
+ uint32_t subChannel)
+{
+
+ radio_metadata_allocate(metadata, channel, subChannel);
+ for (size_t i = 0; i < halMetadata.size(); i++) {
+ radio_metadata_key_t key = static_cast<radio_metadata_key_t>(halMetadata[i].key);
+ radio_metadata_type_t type = static_cast<radio_metadata_key_t>(halMetadata[i].type);
+ radio_metadata_clock_t clock;
+
+ switch (type) {
+ case RADIO_METADATA_TYPE_INT:
+ radio_metadata_add_int(metadata, key, halMetadata[i].intValue);
+ break;
+ case RADIO_METADATA_TYPE_TEXT:
+ radio_metadata_add_text(metadata, key, halMetadata[i].stringValue.c_str());
+ break;
+ case RADIO_METADATA_TYPE_RAW:
+ radio_metadata_add_raw(metadata, key,
+ halMetadata[i].rawValue.data(),
+ halMetadata[i].rawValue.size());
+ break;
+ case RADIO_METADATA_TYPE_CLOCK:
+ clock.utc_seconds_since_epoch =
+ halMetadata[i].clockValue.utcSecondsSinceEpoch;
+ clock.timezone_offset_in_minutes =
+ halMetadata[i].clockValue.timezoneOffsetInMinutes;
+ radio_metadata_add_clock(metadata, key, &clock);
+ break;
+ default:
+ ALOGW("%s invalid metadata type %u",__FUNCTION__, halMetadata[i].type);
+ break;
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/radio/HidlUtils.h b/services/radio/HidlUtils.h
new file mode 100644
index 0000000..091abb7
--- /dev/null
+++ b/services/radio/HidlUtils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <hardware/radio.h>
+
+namespace android {
+
+using android::hardware::hidl_vec;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::Properties;
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class HidlUtils {
+public:
+ static int convertHalResult(Result result);
+ static void convertBandConfigFromHal(radio_hal_band_config_t *config,
+ const BandConfig *halConfig);
+ static void convertPropertiesFromHal(radio_hal_properties_t *properties,
+ const Properties *halProperties);
+ static void convertBandConfigToHal(BandConfig *halConfig,
+ const radio_hal_band_config_t *config);
+ static void convertProgramInfoFromHal(radio_program_info_t *info,
+ const ProgramInfo *halInfo,
+ bool withMetadata);
+ static void convertMetaDataFromHal(radio_metadata_t **metadata,
+ const hidl_vec<MetaData>& halMetadata,
+ uint32_t channel,
+ uint32_t subChannel);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
new file mode 100644
index 0000000..07cb4d5
--- /dev/null
+++ b/services/radio/RadioHalHidl.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/radio_metadata.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadioFactory.h>
+
+#include "RadioHalHidl.h"
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::IBroadcastRadioFactory;
+using android::hardware::broadcastradio::V1_0::Class;
+using android::hardware::broadcastradio::V1_0::Direction;
+using android::hardware::broadcastradio::V1_0::Properties;
+
+
+/* static */
+sp<RadioInterface> RadioInterface::connectModule(radio_class_t classId)
+{
+ return new RadioHalHidl(classId);
+}
+
+int RadioHalHidl::getProperties(radio_hal_properties_t *properties)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ sp<IBroadcastRadio> module = getService();
+ if (module == 0) {
+ return -ENODEV;
+ }
+ Properties halProperties;
+ Result halResult;
+ Return<void> hidlReturn =
+ module->getProperties([&](Result result, const Properties& properties) {
+ halResult = result;
+ if (result == Result::OK) {
+ halProperties = properties;
+ }
+ });
+
+ if (hidlReturn.getStatus().transactionError() == DEAD_OBJECT) {
+ clearService();
+ return -EPIPE;
+ }
+ if (halResult == Result::OK) {
+ HidlUtils::convertPropertiesFromHal(properties, &halProperties);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner)
+{
+ sp<IBroadcastRadio> module = getService();
+ if (module == 0) {
+ return -ENODEV;
+ }
+ sp<Tuner> tunerImpl = new Tuner(callback, this);
+
+ BandConfig halConfig;
+ Result halResult;
+ sp<ITuner> halTuner;
+
+ HidlUtils::convertBandConfigToHal(&halConfig, config);
+ Return<void> hidlReturn =
+ module->openTuner(halConfig, audio, tunerImpl,
+ [&](Result result, const sp<ITuner>& tuner) {
+ halResult = result;
+ if (result == Result::OK) {
+ halTuner = tuner;
+ }
+ });
+
+ if (hidlReturn.getStatus().transactionError() == DEAD_OBJECT) {
+ clearService();
+ return -EPIPE;
+ }
+ if (halResult == Result::OK) {
+ tunerImpl->setHalTuner(halTuner);
+ tuner = tunerImpl;
+ }
+
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::closeTuner(sp<TunerInterface>& tuner)
+{
+ sp<Tuner> tunerImpl = static_cast<Tuner *>(tuner.get());
+ sp<ITuner> clearTuner;
+ tunerImpl->setHalTuner(clearTuner);
+ return 0;
+}
+
+RadioHalHidl::RadioHalHidl(radio_class_t classId)
+ : mClassId(classId)
+{
+}
+
+RadioHalHidl::~RadioHalHidl()
+{
+}
+
+sp<IBroadcastRadio> RadioHalHidl::getService()
+{
+ if (mHalModule == 0) {
+ sp<IBroadcastRadioFactory> factory = IBroadcastRadioFactory::getService("broadcastradio");
+ if (factory != 0) {
+ factory->connectModule(static_cast<Class>(mClassId),
+ [&](Result retval, const ::android::sp<IBroadcastRadio>& result) {
+ if (retval == Result::OK) {
+ mHalModule = result;
+ }
+ });
+ }
+ }
+ ALOGV("%s OUT module %p", __FUNCTION__, mHalModule.get());
+ return mHalModule;
+}
+
+void RadioHalHidl::clearService()
+{
+ ALOGV("%s IN module %p", __FUNCTION__, mHalModule.get());
+ mHalModule.clear();
+}
+
+
+int RadioHalHidl::Tuner::setConfiguration(const radio_hal_band_config_t *config)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ BandConfig halConfig;
+ HidlUtils::convertBandConfigToHal(&halConfig, config);
+
+ Return<Result> hidlResult = mHalTuner->setConfiguration(halConfig);
+ checkHidlStatus(hidlResult.getStatus());
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getConfiguration(radio_hal_band_config_t *config)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ BandConfig halConfig;
+ Result halResult;
+ Return<void> hidlReturn =
+ mHalTuner->getConfiguration([&](Result result, const BandConfig& config) {
+ halResult = result;
+ if (result == Result::OK) {
+ halConfig = config;
+ }
+ });
+ status_t status = checkHidlStatus(hidlReturn.getStatus());
+ if (status == NO_ERROR && halResult == Result::OK) {
+ HidlUtils::convertBandConfigFromHal(config, &halConfig);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::Tuner::scan(radio_direction_t direction, bool skip_sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->scan(static_cast<Direction>(direction), skip_sub_channel);
+ checkHidlStatus(hidlResult.getStatus());
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::step(radio_direction_t direction, bool skip_sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->step(static_cast<Direction>(direction), skip_sub_channel);
+ checkHidlStatus(hidlResult.getStatus());
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::tune(unsigned int channel, unsigned int sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->tune(channel, sub_channel);
+ checkHidlStatus(hidlResult.getStatus());
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::cancel()
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult = mHalTuner->cancel();
+ checkHidlStatus(hidlResult.getStatus());
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getProgramInformation(radio_program_info_t *info)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ ProgramInfo halInfo;
+ Result halResult;
+ bool withMetaData = (info->metadata != NULL);
+ Return<void> hidlReturn = mHalTuner->getProgramInformation(
+ withMetaData, [&](Result result, const ProgramInfo& info) {
+ halResult = result;
+ if (result == Result::OK) {
+ halInfo = info;
+ }
+ });
+ status_t status = checkHidlStatus(hidlReturn.getStatus());
+ if (status == NO_ERROR && halResult == Result::OK) {
+ HidlUtils::convertProgramInfoFromHal(info, &halInfo, withMetaData);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+Return<void> RadioHalHidl::Tuner::hardwareFailure()
+{
+ ALOGV("%s IN", __FUNCTION__);
+ handleHwFailure();
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::configChange(Result result, const BandConfig& config)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_CONFIG;
+ event.status = HidlUtils::convertHalResult(result);
+ HidlUtils::convertBandConfigFromHal(&event.config, &config);
+ onCallback(&event);
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::tuneComplete(Result result, const ProgramInfo& info)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_TUNED;
+ event.status = HidlUtils::convertHalResult(result);
+ HidlUtils::convertProgramInfoFromHal(&event.info, &info, true);
+ onCallback(&event);
+ if (event.info.metadata != NULL) {
+ radio_metadata_deallocate(event.info.metadata);
+ }
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::afSwitch(const ProgramInfo& info)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_AF_SWITCH;
+ HidlUtils::convertProgramInfoFromHal(&event.info, &info, true);
+ onCallback(&event);
+ if (event.info.metadata != NULL) {
+ radio_metadata_deallocate(event.info.metadata);
+ }
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::antennaStateChange(bool connected)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_ANTENNA;
+ event.on = connected;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::trafficAnnouncement(bool active)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_TA;
+ event.on = active;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::emergencyAnnouncement(bool active)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_EA;
+ event.on = active;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::newMetadata(uint32_t channel, uint32_t subChannel,
+ const ::android::hardware::hidl_vec<MetaData>& metadata)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_METADATA;
+ HidlUtils::convertMetaDataFromHal(&event.metadata, metadata, channel, subChannel);
+ onCallback(&event);
+ if (event.metadata != NULL) {
+ radio_metadata_deallocate(event.info.metadata);
+ }
+ return Return<void>();
+}
+
+
+RadioHalHidl::Tuner::Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module)
+ : TunerInterface(), mHalTuner(NULL), mCallback(callback), mParentModule(module)
+{
+}
+
+
+RadioHalHidl::Tuner::~Tuner()
+{
+}
+
+void RadioHalHidl::Tuner::handleHwFailure()
+{
+ ALOGV("%s IN", __FUNCTION__);
+ sp<RadioHalHidl> parentModule = mParentModule.promote();
+ if (parentModule != 0) {
+ parentModule->clearService();
+ }
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_HW_FAILURE;
+ onCallback(&event);
+ mHalTuner.clear();
+}
+
+status_t RadioHalHidl::Tuner::checkHidlStatus(Status hidlStatus)
+{
+ status_t status = hidlStatus.transactionError();
+ if (status == DEAD_OBJECT) {
+ handleHwFailure();
+ }
+ return status;
+}
+
+void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent)
+{
+ if (mCallback != 0) {
+ mCallback->onEvent(halEvent);
+ }
+}
+
+} // namespace android
diff --git a/services/radio/RadioHalHidl.h b/services/radio/RadioHalHidl.h
new file mode 100644
index 0000000..5211ee2
--- /dev/null
+++ b/services/radio/RadioHalHidl.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadio.h>
+#include <android/hardware/broadcastradio/1.0/ITuner.h>
+#include <android/hardware/broadcastradio/1.0/ITunerCallback.h>
+
+namespace android {
+
+using android::hardware::Status;
+using android::hardware::Return;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::IBroadcastRadio;
+using android::hardware::broadcastradio::V1_0::ITuner;
+using android::hardware::broadcastradio::V1_0::ITunerCallback;
+
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class RadioHalHidl : public RadioInterface
+{
+public:
+ RadioHalHidl(radio_class_t classId);
+
+ // RadioInterface
+ virtual int getProperties(radio_hal_properties_t *properties);
+ virtual int openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner);
+ virtual int closeTuner(sp<TunerInterface>& tuner);
+
+ class Tuner : public TunerInterface, public virtual ITunerCallback
+ {
+ public:
+ Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module);
+
+ // TunerInterface
+ virtual int setConfiguration(const radio_hal_band_config_t *config);
+ virtual int getConfiguration(radio_hal_band_config_t *config);
+ virtual int scan(radio_direction_t direction, bool skip_sub_channel);
+ virtual int step(radio_direction_t direction, bool skip_sub_channel);
+ virtual int tune(unsigned int channel, unsigned int sub_channel);
+ virtual int cancel();
+ virtual int getProgramInformation(radio_program_info_t *info);
+
+ // ITunerCallback
+ virtual Return<void> hardwareFailure();
+ virtual Return<void> configChange(Result result, const BandConfig& config);
+ virtual Return<void> tuneComplete(Result result, const ProgramInfo& info);
+ virtual Return<void> afSwitch(const ProgramInfo& info);
+ virtual Return<void> antennaStateChange(bool connected);
+ virtual Return<void> trafficAnnouncement(bool active);
+ virtual Return<void> emergencyAnnouncement(bool active);
+ virtual Return<void> newMetadata(uint32_t channel, uint32_t subChannel,
+ const ::android::hardware::hidl_vec<MetaData>& metadata);
+
+ void setHalTuner(sp<ITuner>& halTuner) { mHalTuner = halTuner; }
+ sp<ITuner> getHalTuner() { return mHalTuner; }
+
+ private:
+ virtual ~Tuner();
+
+ void onCallback(radio_hal_event_t *halEvent);
+ void handleHwFailure();
+ status_t checkHidlStatus(Status hidlStatus);
+
+ sp<ITuner> mHalTuner;
+ sp<TunerCallbackInterface> mCallback;
+ wp<RadioHalHidl> mParentModule;
+ };
+
+ sp<IBroadcastRadio> getService();
+ void clearService();
+
+private:
+ virtual ~RadioHalHidl();
+
+ radio_class_t mClassId;
+ sp<IBroadcastRadio> mHalModule;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_HIDL_H
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 2533132..e1e1fb1 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -30,7 +30,7 @@
libcutils \
libhardware \
libsoundtrigger \
- libmedia \
+ libaudioclient \
libserviceutility
@@ -42,7 +42,8 @@
LOCAL_SHARED_LIBRARIES += \
libhwbinder \
- libhidl \
+ libhidlbase \
+ libhidltransport \
libbase \
android.hardware.soundtrigger@2.0 \
android.hardware.audio.common@2.0
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index e71d742..ecbdec4 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -19,7 +19,6 @@
#include <utils/Log.h>
#include "SoundTriggerHalHidl.h"
-#include <hidl/IServiceManager.h>
#include <hwbinder/IPCThreadState.h>
#include <hwbinder/ProcessState.h>
@@ -29,14 +28,6 @@
using android::hardware::ProcessState;
using android::hardware::audio::common::V2_0::AudioDevice;
-pthread_once_t SoundTriggerHalHidl::sOnceControl = PTHREAD_ONCE_INIT;
-
-void SoundTriggerHalHidl::sOnceInit()
-{
- ProcessState::self()->setThreadPoolMaxThreadCount(1);
- ProcessState::self()->startThreadPool();
-}
-
/* static */
sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
{
@@ -52,26 +43,27 @@
ISoundTriggerHw::Properties halProperties;
Return<void> hidlReturn;
- int32_t halReturn;
+ int ret;
{
AutoMutex lock(mHalLock);
hidlReturn = soundtrigger->getProperties([&](int rc, auto res) {
- halReturn = rc;
+ ret = rc;
halProperties = res;
ALOGI("getProperties res implementor %s", res.implementor.c_str());
});
}
- int ret = 0;
if (hidlReturn.getStatus().isOk()) {
- convertPropertiesFromHal(properties, &halProperties);
+ if (ret == 0) {
+ convertPropertiesFromHal(properties, &halProperties);
+ }
} else {
ret = (int)hidlReturn.getStatus().transactionError();
if (ret == -EPIPE) {
clearService();
}
}
-
+ ALOGI("getProperties ret %d", ret);
return ret;
}
@@ -110,7 +102,7 @@
}
Return<void> hidlReturn;
- int32_t halReturn;
+ int ret;
SoundModelHandle halHandle;
{
AutoMutex lock(mHalLock);
@@ -118,14 +110,14 @@
hidlReturn = soundtrigger->loadPhraseSoundModel(
*(const ISoundTriggerHw::PhraseSoundModel *)halSoundModel,
this, modelId, [&](int32_t retval, auto res) {
- halReturn = retval;
+ ret = retval;
halHandle = res;
});
} else {
hidlReturn = soundtrigger->loadSoundModel(*halSoundModel,
this, modelId, [&](int32_t retval, auto res) {
- halReturn = retval;
+ ret = retval;
halHandle = res;
});
}
@@ -133,12 +125,13 @@
delete halSoundModel;
- int ret = 0;
if (hidlReturn.getStatus().isOk()) {
- AutoMutex lock(mLock);
- *handle = (sound_model_handle_t)modelId;
- sp<SoundModel> model = new SoundModel(*handle, callback, cookie, halHandle);
- mSoundModels.add(*handle, model);
+ if (ret == 0) {
+ AutoMutex lock(mLock);
+ *handle = (sound_model_handle_t)modelId;
+ sp<SoundModel> model = new SoundModel(*handle, callback, cookie, halHandle);
+ mSoundModels.add(*handle, model);
+ }
} else {
ret = (int)hidlReturn.getStatus().transactionError();
ALOGE("loadSoundModel error %d", ret);
@@ -164,18 +157,19 @@
return -EINVAL;
}
- Return<int32_t> halReturn(0);
+ Return<int32_t> hidlReturn(0);
{
AutoMutex lock(mHalLock);
- halReturn = soundtrigger->unloadSoundModel(model->mHalHandle);
+ hidlReturn = soundtrigger->unloadSoundModel(model->mHalHandle);
}
-
- int ret = (int)halReturn.getStatus().transactionError();
+ int ret = (int)hidlReturn.getStatus().transactionError();
ALOGE_IF(ret != 0, "unloadSoundModel error %d", ret);
if (ret == -EPIPE) {
clearService();
}
-
+ if (ret == 0) {
+ ret = hidlReturn;
+ }
return ret;
}
@@ -201,19 +195,22 @@
ISoundTriggerHw::RecognitionConfig *halConfig =
convertRecognitionConfigToHal(config);
- Return<int32_t> halReturn(0);
+ Return<int32_t> hidlReturn(0);
{
AutoMutex lock(mHalLock);
- halReturn = soundtrigger->startRecognition(model->mHalHandle, *halConfig, this, handle);
+ hidlReturn = soundtrigger->startRecognition(model->mHalHandle, *halConfig, this, handle);
}
delete halConfig;
- int ret = (int)halReturn.getStatus().transactionError();
+ int ret = (int)hidlReturn.getStatus().transactionError();
ALOGE_IF(ret != 0, "startRecognition error %d", ret);
if (ret == -EPIPE) {
clearService();
}
+ if (ret == 0) {
+ ret = hidlReturn;
+ }
return ret;
}
@@ -230,17 +227,20 @@
return -EINVAL;
}
- Return<int32_t> halReturn(0);
+ Return<int32_t> hidlReturn(0);
{
AutoMutex lock(mHalLock);
- halReturn = soundtrigger->stopRecognition(model->mHalHandle);
+ hidlReturn = soundtrigger->stopRecognition(model->mHalHandle);
}
- int ret = (int)halReturn.getStatus().transactionError();
+ int ret = (int)hidlReturn.getStatus().transactionError();
ALOGE_IF(ret != 0, "stopRecognition error %d", ret);
if (ret == -EPIPE) {
clearService();
}
+ if (ret == 0) {
+ ret = hidlReturn;
+ }
return ret;
}
@@ -251,17 +251,20 @@
return -ENODEV;
}
- Return<int32_t> halReturn(0);
+ Return<int32_t> hidlReturn(0);
{
AutoMutex lock(mHalLock);
- Return<int32_t> halReturn = soundtrigger->stopAllRecognitions();
+ hidlReturn = soundtrigger->stopAllRecognitions();
}
- int ret = (int)halReturn.getStatus().transactionError();
+ int ret = (int)hidlReturn.getStatus().transactionError();
ALOGE_IF(ret != 0, "stopAllRecognitions error %d", ret);
if (ret == -EPIPE) {
clearService();
}
+ if (ret == 0) {
+ ret = hidlReturn;
+ }
return ret;
}
@@ -270,11 +273,6 @@
{
}
-void SoundTriggerHalHidl::onFirstRef()
-{
- pthread_once(&sOnceControl, &sOnceInit);
-}
-
SoundTriggerHalHidl::~SoundTriggerHalHidl()
{
}
diff --git a/services/soundtrigger/SoundTriggerHalHidl.h b/services/soundtrigger/SoundTriggerHalHidl.h
index 60404dc..e578dda 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.h
+++ b/services/soundtrigger/SoundTriggerHalHidl.h
@@ -82,9 +82,6 @@
*/
virtual int stopAllRecognitions();
- // RefBase
- virtual void onFirstRef();
-
// ISoundTriggerHwCallback
virtual ::android::hardware::Return<void> recognitionCallback(
const ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie);