Merge "Use ToneGenerator::tone_type consistently"
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 1417416..7b0b443 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -226,8 +226,8 @@
AudioEffect(const effect_uuid_t *type,
const effect_uuid_t *uuid = NULL,
int32_t priority = 0,
- effect_callback_t cbf = 0,
- void* user = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
int sessionId = 0,
audio_io_handle_t io = 0
);
@@ -238,8 +238,8 @@
AudioEffect(const char *typeStr,
const char *uuidStr = NULL,
int32_t priority = 0,
- effect_callback_t cbf = 0,
- void* user = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
int sessionId = 0,
audio_io_handle_t io = 0
);
@@ -260,8 +260,8 @@
status_t set(const effect_uuid_t *type,
const effect_uuid_t *uuid = NULL,
int32_t priority = 0,
- effect_callback_t cbf = 0,
- void* user = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
int sessionId = 0,
audio_io_handle_t io = 0
);
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 756e91d..c8c5dba 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -149,14 +149,14 @@
RECORD_IIR_ENABLE = AUDIO_IN_ACOUSTICS_TX_IIR_ENABLE,
};
- AudioRecord(int inputSource,
+ AudioRecord(audio_source_t inputSource,
uint32_t sampleRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channelMask = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
- callback_t cbf = 0,
- void* user = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
int notificationFrames = 0,
int sessionId = 0);
@@ -175,14 +175,14 @@
* - NO_INIT: audio server or audio hardware not initialized
* - PERMISSION_DENIED: recording is not allowed for the requesting process
* */
- status_t set(int inputSource = 0,
+ status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
uint32_t sampleRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channelMask = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
- callback_t cbf = 0,
- void* user = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
int notificationFrames = 0,
bool threadCanCallJava = false,
int sessionId = 0);
@@ -208,7 +208,7 @@
int channels() const;
uint32_t frameCount() const;
size_t frameSize() const;
- int inputSource() const;
+ audio_source_t inputSource() const;
/* After it's created the track is not active. Call start() to
@@ -367,8 +367,7 @@
audio_track_cblk_t* mCblk;
audio_format_t mFormat;
uint8_t mChannelCount;
- uint8_t mInputSource;
- uint8_t mReserved[2];
+ audio_source_t mInputSource;
status_t mStatus;
uint32_t mLatency;
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index c6368fb..74a1e62 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -163,7 +163,7 @@
audio_stream_type_t stream,
int session = 0);
static void releaseOutput(audio_io_handle_t output);
- static audio_io_handle_t getInput(int inputSource,
+ static audio_io_handle_t getInput(audio_source_t inputSource,
uint32_t samplingRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = AUDIO_CHANNEL_IN_MONO,
@@ -248,7 +248,7 @@
static sp<IAudioPolicyService> gAudioPolicyService;
// mapping between stream types and outputs
- static DefaultKeyedVector<int, audio_io_handle_t> gStreamOutputMap;
+ static DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> gStreamOutputMap;
// list of output descriptors containing cached parameters
// (sampling rate, framecount, channel count...)
static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 98abfbd..02c85cd 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -148,8 +148,8 @@
int channelMask = 0,
int frameCount = 0,
uint32_t flags = 0,
- callback_t cbf = 0,
- void* user = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
int notificationFrames = 0,
int sessionId = 0);
@@ -180,8 +180,8 @@
int channelMask = 0,
const sp<IMemory>& sharedBuffer = 0,
uint32_t flags = 0,
- callback_t cbf = 0,
- void* user = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
int notificationFrames = 0,
int sessionId = 0);
@@ -204,8 +204,8 @@
int channelMask = 0,
int frameCount = 0,
uint32_t flags = 0,
- callback_t cbf = 0,
- void* user = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
int notificationFrames = 0,
const sp<IMemory>& sharedBuffer = 0,
bool threadCanCallJava = false,
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 07d17c5..4d88297 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -60,7 +60,7 @@
audio_stream_type_t stream,
int session = 0) = 0;
virtual void releaseOutput(audio_io_handle_t output) = 0;
- virtual audio_io_handle_t getInput(int inputSource,
+ virtual audio_io_handle_t getInput(audio_source_t inputSource,
uint32_t samplingRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index c4cc947..a295e9a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -42,10 +42,10 @@
typedef void *buffer_id;
typedef void *node_id;
- // Given the calling process' pid, returns true iff
+ // Given a node_id and the calling process' pid, returns true iff
// the implementation of the OMX interface lives in the same
// process.
- virtual bool livesLocally(pid_t pid) = 0;
+ virtual bool livesLocally(node_id node, pid_t pid) = 0;
struct ComponentInfo {
String8 mName;
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 1a4cbca..60fa15b 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -66,8 +66,8 @@
* See AudioEffect constructor for details on parameters.
*/
Visualizer(int32_t priority = 0,
- effect_callback_t cbf = 0,
- void* user = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
int sessionId = 0);
~Visualizer();
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 19bd31b..2427e2f 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -34,7 +34,7 @@
// Note that the "channels" parameter is _not_ the number of channels,
// but a bitmask of audio_channels_t constants.
AudioSource(
- int inputSource, uint32_t sampleRate,
+ audio_source_t inputSource, uint32_t sampleRate,
uint32_t channels = AUDIO_CHANNEL_IN_MONO);
status_t initCheck() const;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ffc546e..dd97ce4 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -76,7 +76,9 @@
// Left channel is in [0:15], right channel is in [16:31].
// Always read and write the combined pair atomically.
// For AudioTrack only, not used by AudioRecord.
- uint32_t volumeLR;
+private:
+ uint32_t mVolumeLR;
+public:
uint32_t sampleRate;
// NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
@@ -116,6 +118,17 @@
uint16_t getSendLevel_U4_12() const {
return mSendLevel;
}
+
+ // for AudioTrack client only, caller must limit to 0 <= volumeLR <= 0x10001000
+ void setVolumeLR(uint32_t volumeLR) {
+ mVolumeLR = volumeLR;
+ }
+
+ // for AudioFlinger only; the return value must be validated by the caller
+ uint32_t getVolumeLR() const {
+ return mVolumeLR;
+ }
+
};
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index 77d40b6..7f7c7e1 100755
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -13,7 +13,7 @@
LOCAL_C_INCLUDES += \
external/webrtc/src \
external/webrtc/src/modules/interface \
- external/webrtc/src/modules/audio_processing/main/interface \
+ external/webrtc/src/modules/audio_processing/interface \
system/media/audio_effects/include
LOCAL_C_INCLUDES += $(call include-path-for, speex)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index e988e06..9fd6764 100755
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -24,8 +24,8 @@
#include <audio_effects/effect_aec.h>
#include <audio_effects/effect_agc.h>
#include <audio_effects/effect_ns.h>
-#include "modules/interface/module_common_types.h"
-#include "modules/audio_processing/main/interface/audio_processing.h"
+#include <module_common_types.h>
+#include <audio_processing.h>
#include "speex/speex_resampler.h"
@@ -220,8 +220,8 @@
// Automatic Gain Control (AGC)
//------------------------------------------------------------------------------
-static const int kAgcDefaultTargetLevel = 0;
-static const int kAgcDefaultCompGain = 90;
+static const int kAgcDefaultTargetLevel = 3;
+static const int kAgcDefaultCompGain = 9;
static const bool kAgcDefaultLimiter = true;
int AgcInit (preproc_effect_t *effect)
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 6639d06..a242846 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -342,7 +342,7 @@
{
ALOGW("IEffect died");
mStatus = NO_INIT;
- if (mCbf) {
+ if (mCbf != NULL) {
status_t status = DEAD_OBJECT;
mCbf(EVENT_ERROR, mUserData, &status);
}
@@ -363,7 +363,7 @@
mStatus = ALREADY_EXISTS;
}
}
- if (mCbf) {
+ if (mCbf != NULL) {
mCbf(EVENT_CONTROL_STATUS_CHANGED, mUserData, &controlGranted);
}
}
@@ -373,7 +373,7 @@
ALOGV("enableStatusChanged %p enabled %d mCbf %p", this, enabled, mCbf);
if (mStatus == ALREADY_EXISTS) {
mEnabled = enabled;
- if (mCbf) {
+ if (mCbf != NULL) {
mCbf(EVENT_ENABLE_STATUS_CHANGED, mUserData, &enabled);
}
}
@@ -389,7 +389,7 @@
return;
}
- if (mCbf && cmdCode == EFFECT_CMD_SET_PARAM) {
+ if (mCbf != NULL && cmdCode == EFFECT_CMD_SET_PARAM) {
effect_param_t *cmd = (effect_param_t *)cmdData;
cmd->status = *(int32_t *)replyData;
mCbf(EVENT_PARAMETER_CHANGED, mUserData, cmd);
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 5b5b076..c96bc76 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -84,7 +84,7 @@
}
AudioRecord::AudioRecord(
- int inputSource,
+ audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
uint32_t channelMask,
@@ -119,7 +119,7 @@
}
status_t AudioRecord::set(
- int inputSource,
+ audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
uint32_t channelMask,
@@ -206,7 +206,7 @@
return status;
}
- if (cbf != 0) {
+ if (cbf != NULL) {
mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava);
}
@@ -228,7 +228,7 @@
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
- mInputSource = (uint8_t)inputSource;
+ mInputSource = inputSource;
mFlags = flags;
mInput = input;
AudioSystem::acquireAudioSessionId(mSessionId);
@@ -272,9 +272,9 @@
}
}
-int AudioRecord::inputSource() const
+audio_source_t AudioRecord::inputSource() const
{
- return (int)mInputSource;
+ return mInputSource;
}
// -------------------------------------------------------------------------
@@ -387,7 +387,7 @@
status_t AudioRecord::setMarkerPosition(uint32_t marker)
{
- if (mCbf == 0) return INVALID_OPERATION;
+ if (mCbf == NULL) return INVALID_OPERATION;
mMarkerPosition = marker;
mMarkerReached = false;
@@ -397,7 +397,7 @@
status_t AudioRecord::getMarkerPosition(uint32_t *marker)
{
- if (marker == 0) return BAD_VALUE;
+ if (marker == NULL) return BAD_VALUE;
*marker = mMarkerPosition;
@@ -406,7 +406,7 @@
status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
{
- if (mCbf == 0) return INVALID_OPERATION;
+ if (mCbf == NULL) return INVALID_OPERATION;
uint32_t curPosition;
getPosition(&curPosition);
@@ -418,7 +418,7 @@
status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod)
{
- if (updatePeriod == 0) return BAD_VALUE;
+ if (updatePeriod == NULL) return BAD_VALUE;
*updatePeriod = mUpdatePeriod;
@@ -427,7 +427,7 @@
status_t AudioRecord::getPosition(uint32_t *position)
{
- if (position == 0) return BAD_VALUE;
+ if (position == NULL) return BAD_VALUE;
AutoMutex lock(mLock);
*position = mCblk->user;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 952d634..110a294 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -35,7 +35,8 @@
sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
// Cached values
-DefaultKeyedVector<int, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
+
+DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
// Cached values for recording queries, all protected by gLock
@@ -224,7 +225,7 @@
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
- if (outputDesc == 0) {
+ if (outputDesc == NULL) {
ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -262,7 +263,7 @@
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
- if (outputDesc == 0) {
+ if (outputDesc == NULL) {
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
@@ -293,7 +294,7 @@
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
- if (outputDesc == 0) {
+ if (outputDesc == NULL) {
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
@@ -404,7 +405,7 @@
void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, void *param2) {
ALOGV("ioConfigChanged() event %d", event);
OutputDescriptor *desc;
- uint32_t stream;
+ audio_stream_type_t stream;
if (ioHandle == 0) return;
@@ -412,8 +413,8 @@
switch (event) {
case STREAM_CONFIG_CHANGED:
- if (param2 == 0) break;
- stream = *(uint32_t *)param2;
+ if (param2 == NULL) break;
+ stream = *(audio_stream_type_t *)param2;
ALOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %d", stream, ioHandle);
if (gStreamOutputMap.indexOfKey(stream) >= 0) {
gStreamOutputMap.replaceValueFor(stream, ioHandle);
@@ -424,7 +425,7 @@
ALOGV("ioConfigChanged() opening already existing output! %d", ioHandle);
break;
}
- if (param2 == 0) break;
+ if (param2 == NULL) break;
desc = (OutputDescriptor *)param2;
OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
@@ -453,7 +454,7 @@
ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
break;
}
- if (param2 == 0) break;
+ if (param2 == NULL) break;
desc = (OutputDescriptor *)param2;
ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %d frameCount %d latency %d",
@@ -630,7 +631,7 @@
aps->releaseOutput(output);
}
-audio_io_handle_t AudioSystem::getInput(int inputSource,
+audio_io_handle_t AudioSystem::getInput(audio_source_t inputSource,
uint32_t samplingRate,
audio_format_t format,
uint32_t channels,
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 17e3d4b..8c33f41 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -257,7 +257,7 @@
return status;
}
- if (cbf != 0) {
+ if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
}
@@ -501,7 +501,7 @@
mVolume[LEFT] = left;
mVolume[RIGHT] = right;
- mCblk->volumeLR = (uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000);
+ mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
return NO_ERROR;
}
@@ -604,13 +604,13 @@
status_t AudioTrack::getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount)
{
AutoMutex lock(mLock);
- if (loopStart != 0) {
+ if (loopStart != NULL) {
*loopStart = mCblk->loopStart;
}
- if (loopEnd != 0) {
+ if (loopEnd != NULL) {
*loopEnd = mCblk->loopEnd;
}
- if (loopCount != 0) {
+ if (loopCount != NULL) {
if (mCblk->loopCount < 0) {
*loopCount = -1;
} else {
@@ -623,7 +623,7 @@
status_t AudioTrack::setMarkerPosition(uint32_t marker)
{
- if (mCbf == 0) return INVALID_OPERATION;
+ if (mCbf == NULL) return INVALID_OPERATION;
mMarkerPosition = marker;
mMarkerReached = false;
@@ -633,7 +633,7 @@
status_t AudioTrack::getMarkerPosition(uint32_t *marker)
{
- if (marker == 0) return BAD_VALUE;
+ if (marker == NULL) return BAD_VALUE;
*marker = mMarkerPosition;
@@ -642,7 +642,7 @@
status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
{
- if (mCbf == 0) return INVALID_OPERATION;
+ if (mCbf == NULL) return INVALID_OPERATION;
uint32_t curPosition;
getPosition(&curPosition);
@@ -654,7 +654,7 @@
status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod)
{
- if (updatePeriod == 0) return BAD_VALUE;
+ if (updatePeriod == NULL) return BAD_VALUE;
*updatePeriod = mUpdatePeriod;
@@ -679,7 +679,7 @@
status_t AudioTrack::getPosition(uint32_t *position)
{
- if (position == 0) return BAD_VALUE;
+ if (position == NULL) return BAD_VALUE;
AutoMutex lock(mLock);
*position = mFlushed ? 0 : mCblk->server;
@@ -837,7 +837,7 @@
mCblk->stepUser(mCblk->frameCount);
}
- mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
+ mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000));
mCblk->setSendLevel(mSendLevel);
mAudioTrack->attachAuxEffect(mAuxEffectId);
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
@@ -1319,8 +1319,8 @@
audio_track_cblk_t::audio_track_cblk_t()
: lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0),
- userBase(0), serverBase(0), buffers(0), frameCount(0),
- loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), volumeLR(0),
+ userBase(0), serverBase(0), buffers(NULL), frameCount(0),
+ loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000),
mSendLevel(0), flags(0)
{
}
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 0d442ef..7c5589d 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -640,7 +640,7 @@
*id = tmp;
}
tmp = reply.readInt32();
- if (enabled) {
+ if (enabled != NULL) {
*enabled = tmp;
}
effect = interface_cast<IEffect>(reply.readStrongBinder());
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 5a3f250..9458bc0 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -73,7 +73,7 @@
CHECK_INTERFACE(IAudioFlingerClient, data, reply);
int event = data.readInt32();
int ioHandle = data.readInt32();
- void *param2 = 0;
+ void *param2 = NULL;
AudioSystem::OutputDescriptor desc;
uint32_t stream;
if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index b5c857f..99385aa 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -172,7 +172,7 @@
}
virtual audio_io_handle_t getInput(
- int inputSource,
+ audio_source_t inputSource,
uint32_t samplingRate,
audio_format_t format,
uint32_t channels,
@@ -181,7 +181,7 @@
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(inputSource);
+ data.writeInt32((int32_t) inputSource);
data.writeInt32(samplingRate);
data.writeInt32(static_cast <uint32_t>(format));
data.writeInt32(channels);
@@ -461,7 +461,7 @@
case GET_INPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- int inputSource = data.readInt32();
+ audio_source_t inputSource = (audio_source_t) data.readInt32();
uint32_t samplingRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
uint32_t channels = data.readInt32();
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index d2f5f71..27c7e03 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -59,9 +59,10 @@
: BpInterface<IOMX>(impl) {
}
- virtual bool livesLocally(pid_t pid) {
+ virtual bool livesLocally(node_id node, pid_t pid) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+ data.writeIntPtr((intptr_t)node);
data.writeInt32(pid);
remote()->transact(LIVES_LOCALLY, data, &reply);
@@ -417,7 +418,9 @@
case LIVES_LOCALLY:
{
CHECK_INTERFACE(IOMX, data, reply);
- reply->writeInt32(livesLocally((pid_t)data.readInt32()));
+ node_id node = (void *)data.readIntPtr();
+ pid_t pid = (pid_t)data.readInt32();
+ reply->writeInt32(livesLocally(node, pid));
return OK;
}
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 549a412..e6e989d 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -811,9 +811,9 @@
mThreadCanCallJava = threadCanCallJava;
mStreamType = streamType;
mVolume = volume;
- mpAudioTrack = 0;
- mpToneDesc = 0;
- mpNewToneDesc = 0;
+ mpAudioTrack = NULL;
+ mpToneDesc = NULL;
+ mpNewToneDesc = NULL;
// Generate tone by chunks of 20 ms to keep cadencing precision
mProcessSize = (mSamplingRate * 20) / 1000;
@@ -855,7 +855,7 @@
ToneGenerator::~ToneGenerator() {
ALOGV("ToneGenerator destructor\n");
- if (mpAudioTrack) {
+ if (mpAudioTrack != NULL) {
stopTone();
ALOGV("Delete Track: %p\n", mpAudioTrack);
delete mpAudioTrack;
@@ -1012,7 +1012,7 @@
if (mpAudioTrack) {
delete mpAudioTrack;
- mpAudioTrack = 0;
+ mpAudioTrack = NULL;
}
// Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
@@ -1048,7 +1048,7 @@
if (mpAudioTrack) {
ALOGV("Delete Track I: %p\n", mpAudioTrack);
delete mpAudioTrack;
- mpAudioTrack = 0;
+ mpAudioTrack = NULL;
}
return false;
@@ -1317,7 +1317,7 @@
bool ToneGenerator::prepareWave() {
unsigned int segmentIdx = 0;
- if (!mpNewToneDesc) {
+ if (mpNewToneDesc == NULL) {
return false;
}
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 4d61067..03e8a06 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -74,11 +74,10 @@
libcrypto \
libssl \
libgui \
+ libstagefright_omx \
LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
- libstagefright_amrnbenc \
- libstagefright_amrwbenc \
libstagefright_avcenc \
libstagefright_m4vh263enc \
libstagefright_matroska \
@@ -140,7 +139,6 @@
################################################################################
LOCAL_SHARED_LIBRARIES += \
- libstagefright_amrnb_common \
libstagefright_enc_common \
libstagefright_avc_common \
libstagefright_foundation \
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 2172cc0..8bdb7c5 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -47,7 +47,7 @@
}
AudioSource::AudioSource(
- int inputSource, uint32_t sampleRate, uint32_t channels)
+ audio_source_t inputSource, uint32_t sampleRate, uint32_t channels)
: mStarted(false),
mSampleRate(sampleRate),
mPrevSampleTimeUs(0),
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 8480b6d..8073af8 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -30,7 +30,7 @@
#include "include/MPEG2TSExtractor.h"
#include "include/WVMExtractor.h"
-#include "timedtext/TimedTextPlayer.h"
+#include "timedtext/TimedTextDriver.h"
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -192,7 +192,7 @@
mVideoBuffer(NULL),
mDecryptHandle(NULL),
mLastVideoTimeUs(-1),
- mTextPlayer(NULL) {
+ mTextDriver(NULL) {
CHECK_EQ(mClient.connect(), (status_t)OK);
DataSource::RegisterDefaultSniffers();
@@ -530,9 +530,9 @@
delete mAudioPlayer;
mAudioPlayer = NULL;
- if (mTextPlayer != NULL) {
- delete mTextPlayer;
- mTextPlayer = NULL;
+ if (mTextDriver != NULL) {
+ delete mTextDriver;
+ mTextDriver = NULL;
}
mVideoRenderer.clear();
@@ -1118,7 +1118,7 @@
}
if (mFlags & TEXTPLAYER_STARTED) {
- mTextPlayer->pause();
+ mTextDriver->pause();
modifyFlags(TEXT_RUNNING, CLEAR);
}
@@ -1272,9 +1272,9 @@
}
status_t AwesomePlayer::setTimedTextTrackIndex(int32_t index) {
- if (mTextPlayer != NULL) {
+ if (mTextDriver != NULL) {
if (index >= 0) { // to turn on a text track
- status_t err = mTextPlayer->setTimedTextTrackIndex(index);
+ status_t err = mTextDriver->setTimedTextTrackIndex(index);
if (err != OK) {
return err;
}
@@ -1290,7 +1290,7 @@
modifyFlags(TEXTPLAYER_STARTED, CLEAR);
}
- return mTextPlayer->setTimedTextTrackIndex(index);
+ return mTextDriver->setTimedTextTrackIndex(index);
}
} else {
return INVALID_OPERATION;
@@ -1319,7 +1319,7 @@
seekAudioIfNecessary_l();
if (mFlags & TEXTPLAYER_STARTED) {
- mTextPlayer->seekTo(mSeekTimeUs);
+ mTextDriver->seekToAsync(mSeekTimeUs);
}
if (!(mFlags & PLAYING)) {
@@ -1364,11 +1364,11 @@
Mutex::Autolock autoLock(mTimedTextLock);
CHECK(source != NULL);
- if (mTextPlayer == NULL) {
- mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue);
+ if (mTextDriver == NULL) {
+ mTextDriver = new TimedTextDriver(mListener);
}
- mTextPlayer->addTextSource(source);
+ mTextDriver->addInBandTextSource(source);
}
status_t AwesomePlayer::initAudioDecoder() {
@@ -1695,7 +1695,7 @@
}
if ((mFlags & TEXTPLAYER_STARTED) && !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) {
- mTextPlayer->resume();
+ mTextDriver->resume();
modifyFlags(TEXT_RUNNING, SET);
}
@@ -2241,11 +2241,11 @@
case KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE:
{
Mutex::Autolock autoLock(mTimedTextLock);
- if (mTextPlayer == NULL) {
- mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue);
+ if (mTextDriver == NULL) {
+ mTextDriver = new TimedTextDriver(mListener);
}
- return mTextPlayer->setParameter(key, request);
+ return mTextDriver->addOutOfBandTextSource(request);
}
case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
{
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index bc88015..6c95d4e 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -20,7 +20,6 @@
#include "include/MPEG4Extractor.h"
#include "include/SampleTable.h"
#include "include/ESDS.h"
-#include "timedtext/TimedTextPlayer.h"
#include <arpa/inet.h>
@@ -2430,4 +2429,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 9de873e..7a805aa 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -20,11 +20,299 @@
#include <binder/IServiceManager.h>
#include <media/IMediaPlayerService.h>
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/OMXClient.h>
+#include <utils/KeyedVector.h>
+
+#include "include/OMX.h"
namespace android {
+struct MuxOMX : public IOMX {
+ MuxOMX(const sp<IOMX> &remoteOMX);
+ virtual ~MuxOMX();
+
+ virtual IBinder *onAsBinder() { return NULL; }
+
+ virtual bool livesLocally(node_id node, pid_t pid);
+
+ virtual status_t listNodes(List<ComponentInfo> *list);
+
+ virtual status_t allocateNode(
+ const char *name, const sp<IOMXObserver> &observer,
+ node_id *node);
+
+ virtual status_t freeNode(node_id node);
+
+ virtual status_t sendCommand(
+ node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
+
+ virtual status_t getParameter(
+ node_id node, OMX_INDEXTYPE index,
+ void *params, size_t size);
+
+ virtual status_t setParameter(
+ node_id node, OMX_INDEXTYPE index,
+ const void *params, size_t size);
+
+ virtual status_t getConfig(
+ node_id node, OMX_INDEXTYPE index,
+ void *params, size_t size);
+
+ virtual status_t setConfig(
+ node_id node, OMX_INDEXTYPE index,
+ const void *params, size_t size);
+
+ virtual status_t getState(
+ node_id node, OMX_STATETYPE* state);
+
+ virtual status_t storeMetaDataInBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
+ virtual status_t enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
+ virtual status_t getGraphicBufferUsage(
+ node_id node, OMX_U32 port_index, OMX_U32* usage);
+
+ virtual status_t useBuffer(
+ node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
+ buffer_id *buffer);
+
+ virtual status_t useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
+
+ virtual status_t allocateBuffer(
+ node_id node, OMX_U32 port_index, size_t size,
+ buffer_id *buffer, void **buffer_data);
+
+ virtual status_t allocateBufferWithBackup(
+ node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
+ buffer_id *buffer);
+
+ virtual status_t freeBuffer(
+ node_id node, OMX_U32 port_index, buffer_id buffer);
+
+ virtual status_t fillBuffer(node_id node, buffer_id buffer);
+
+ virtual status_t emptyBuffer(
+ node_id node,
+ buffer_id buffer,
+ OMX_U32 range_offset, OMX_U32 range_length,
+ OMX_U32 flags, OMX_TICKS timestamp);
+
+ virtual status_t getExtensionIndex(
+ node_id node,
+ const char *parameter_name,
+ OMX_INDEXTYPE *index);
+
+private:
+ mutable Mutex mLock;
+
+ sp<IOMX> mRemoteOMX;
+ sp<IOMX> mLocalOMX;
+
+ KeyedVector<node_id, bool> mIsLocalNode;
+
+ bool isLocalNode(node_id node) const;
+ bool isLocalNode_l(node_id node) const;
+ const sp<IOMX> &getOMX(node_id node) const;
+ const sp<IOMX> &getOMX_l(node_id node) const;
+
+ static bool IsSoftwareComponent(const char *name);
+
+ DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
+};
+
+MuxOMX::MuxOMX(const sp<IOMX> &remoteOMX)
+ : mRemoteOMX(remoteOMX) {
+}
+
+MuxOMX::~MuxOMX() {
+}
+
+bool MuxOMX::isLocalNode(node_id node) const {
+ Mutex::Autolock autoLock(mLock);
+
+ return isLocalNode_l(node);
+}
+
+bool MuxOMX::isLocalNode_l(node_id node) const {
+ return mIsLocalNode.indexOfKey(node) >= 0;
+}
+
+// static
+bool MuxOMX::IsSoftwareComponent(const char *name) {
+ return !strncasecmp(name, "OMX.google.", 11);
+}
+
+const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
+ return isLocalNode(node) ? mLocalOMX : mRemoteOMX;
+}
+
+const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const {
+ return isLocalNode_l(node) ? mLocalOMX : mRemoteOMX;
+}
+
+bool MuxOMX::livesLocally(node_id node, pid_t pid) {
+ return getOMX(node)->livesLocally(node, pid);
+}
+
+status_t MuxOMX::listNodes(List<ComponentInfo> *list) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mLocalOMX == NULL) {
+ mLocalOMX = new OMX;
+ }
+
+ return mLocalOMX->listNodes(list);
+}
+
+status_t MuxOMX::allocateNode(
+ const char *name, const sp<IOMXObserver> &observer,
+ node_id *node) {
+ Mutex::Autolock autoLock(mLock);
+
+ sp<IOMX> omx;
+
+ if (IsSoftwareComponent(name)) {
+ if (mLocalOMX == NULL) {
+ mLocalOMX = new OMX;
+ }
+ omx = mLocalOMX;
+ } else {
+ omx = mRemoteOMX;
+ }
+
+ status_t err = omx->allocateNode(name, observer, node);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (omx == mLocalOMX) {
+ mIsLocalNode.add(*node, true);
+ }
+
+ return OK;
+}
+
+status_t MuxOMX::freeNode(node_id node) {
+ Mutex::Autolock autoLock(mLock);
+
+ status_t err = getOMX_l(node)->freeNode(node);
+
+ if (err != OK) {
+ return err;
+ }
+
+ mIsLocalNode.removeItem(node);
+
+ return OK;
+}
+
+status_t MuxOMX::sendCommand(
+ node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ return getOMX(node)->sendCommand(node, cmd, param);
+}
+
+status_t MuxOMX::getParameter(
+ node_id node, OMX_INDEXTYPE index,
+ void *params, size_t size) {
+ return getOMX(node)->getParameter(node, index, params, size);
+}
+
+status_t MuxOMX::setParameter(
+ node_id node, OMX_INDEXTYPE index,
+ const void *params, size_t size) {
+ return getOMX(node)->setParameter(node, index, params, size);
+}
+
+status_t MuxOMX::getConfig(
+ node_id node, OMX_INDEXTYPE index,
+ void *params, size_t size) {
+ return getOMX(node)->getConfig(node, index, params, size);
+}
+
+status_t MuxOMX::setConfig(
+ node_id node, OMX_INDEXTYPE index,
+ const void *params, size_t size) {
+ return getOMX(node)->setConfig(node, index, params, size);
+}
+
+status_t MuxOMX::getState(
+ node_id node, OMX_STATETYPE* state) {
+ return getOMX(node)->getState(node, state);
+}
+
+status_t MuxOMX::storeMetaDataInBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+ return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable);
+}
+
+status_t MuxOMX::enableGraphicBuffers(
+ node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+ return getOMX(node)->enableGraphicBuffers(node, port_index, enable);
+}
+
+status_t MuxOMX::getGraphicBufferUsage(
+ node_id node, OMX_U32 port_index, OMX_U32* usage) {
+ return getOMX(node)->getGraphicBufferUsage(node, port_index, usage);
+}
+
+status_t MuxOMX::useBuffer(
+ node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
+ buffer_id *buffer) {
+ return getOMX(node)->useBuffer(node, port_index, params, buffer);
+}
+
+status_t MuxOMX::useGraphicBuffer(
+ node_id node, OMX_U32 port_index,
+ const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
+ return getOMX(node)->useGraphicBuffer(
+ node, port_index, graphicBuffer, buffer);
+}
+
+status_t MuxOMX::allocateBuffer(
+ node_id node, OMX_U32 port_index, size_t size,
+ buffer_id *buffer, void **buffer_data) {
+ return getOMX(node)->allocateBuffer(
+ node, port_index, size, buffer, buffer_data);
+}
+
+status_t MuxOMX::allocateBufferWithBackup(
+ node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
+ buffer_id *buffer) {
+ return getOMX(node)->allocateBufferWithBackup(
+ node, port_index, params, buffer);
+}
+
+status_t MuxOMX::freeBuffer(
+ node_id node, OMX_U32 port_index, buffer_id buffer) {
+ return getOMX(node)->freeBuffer(node, port_index, buffer);
+}
+
+status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer) {
+ return getOMX(node)->fillBuffer(node, buffer);
+}
+
+status_t MuxOMX::emptyBuffer(
+ node_id node,
+ buffer_id buffer,
+ OMX_U32 range_offset, OMX_U32 range_length,
+ OMX_U32 flags, OMX_TICKS timestamp) {
+ return getOMX(node)->emptyBuffer(
+ node, buffer, range_offset, range_length, flags, timestamp);
+}
+
+status_t MuxOMX::getExtensionIndex(
+ node_id node,
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) {
+ return getOMX(node)->getExtensionIndex(node, parameter_name, index);
+}
+
OMXClient::OMXClient() {
}
@@ -38,6 +326,11 @@
mOMX = service->getOMX();
CHECK(mOMX.get() != NULL);
+ if (!mOMX->livesLocally(NULL /* node */, getpid())) {
+ ALOGI("Using client-side OMX mux.");
+ mOMX = new MuxOMX(mOMX);
+ }
+
return OK;
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7597f64..af4aa79 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -18,8 +18,6 @@
#define LOG_TAG "OMXCodec"
#include <utils/Log.h>
-#include "include/AMRNBEncoder.h"
-#include "include/AMRWBEncoder.h"
#include "include/AVCEncoder.h"
#include "include/M4vH263Encoder.h"
@@ -70,8 +68,6 @@
#define FACTORY_REF(name) { #name, Make##name },
-FACTORY_CREATE_ENCODER(AMRNBEncoder)
-FACTORY_CREATE_ENCODER(AMRWBEncoder)
FACTORY_CREATE_ENCODER(AVCEncoder)
FACTORY_CREATE_ENCODER(M4vH263Encoder)
@@ -84,8 +80,6 @@
};
static const FactoryInfo kFactoryInfo[] = {
- FACTORY_REF(AMRNBEncoder)
- FACTORY_REF(AMRWBEncoder)
FACTORY_REF(AVCEncoder)
FACTORY_REF(M4vH263Encoder)
};
@@ -146,9 +140,9 @@
static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.encode" },
- { MEDIA_MIMETYPE_AUDIO_AMR_NB, "AMRNBEncoder" },
+ { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.google.amrnb.encoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.encode" },
- { MEDIA_MIMETYPE_AUDIO_AMR_WB, "AMRWBEncoder" },
+ { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.google.amrwb.encoder" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.encode" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.google.aac.encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.MPEG4E" },
@@ -1479,7 +1473,7 @@
const sp<MediaSource> &source,
const sp<ANativeWindow> &nativeWindow)
: mOMX(omx),
- mOMXLivesLocally(omx->livesLocally(getpid())),
+ mOMXLivesLocally(omx->livesLocally(node, getpid())),
mNode(node),
mQuirks(quirks),
mFlags(flags),
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC.cpp b/media/libstagefright/codecs/aacdec/SoftAAC.cpp
index da9d280..ea6c360 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC.cpp
@@ -218,6 +218,18 @@
return OMX_ErrorNone;
}
+ case OMX_IndexParamAudioPcm:
+ {
+ const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index b6aed81..94e8726 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -74,3 +74,30 @@
LOCAL_MODULE := libstagefright_amrnbenc
include $(BUILD_STATIC_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ SoftAMRNBEncoder.cpp
+
+LOCAL_C_INCLUDES := \
+ frameworks/base/media/libstagefright/include \
+ frameworks/base/include/media/stagefright/openmax \
+ $(LOCAL_PATH)/src \
+ $(LOCAL_PATH)/include \
+ $(LOCAL_PATH)/../common/include \
+ $(LOCAL_PATH)/../common
+
+LOCAL_STATIC_LIBRARIES := \
+ libstagefright_amrnbenc
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright_omx libstagefright_foundation libutils \
+ libstagefright_amrnb_common
+
+LOCAL_MODULE := libstagefright_soft_amrnbenc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
new file mode 100644
index 0000000..07f8b4f
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAMRNBEncoder"
+#include <utils/Log.h>
+
+#include "SoftAMRNBEncoder.h"
+
+#include "gsmamr_enc.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+static const int32_t kSampleRate = 8000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+SoftAMRNBEncoder::SoftAMRNBEncoder(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mEncState(NULL),
+ mSidState(NULL),
+ mBitRate(0),
+ mMode(MR475),
+ mInputSize(0),
+ mInputTimeUs(-1ll),
+ mSawInputEOS(false),
+ mSignalledError(false) {
+ initPorts();
+ CHECK_EQ(initEncoder(), (status_t)OK);
+}
+
+SoftAMRNBEncoder::~SoftAMRNBEncoder() {
+ if (mEncState != NULL) {
+ AMREncodeExit(&mEncState, &mSidState);
+ mEncState = mSidState = NULL;
+ }
+}
+
+void SoftAMRNBEncoder::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 8192;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/3gpp");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingAMR;
+
+ addPort(def);
+}
+
+status_t SoftAMRNBEncoder::initEncoder() {
+ if (AMREncodeInit(&mEncState, &mSidState, false /* dtx_enable */) != 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+OMX_ERRORTYPE SoftAMRNBEncoder::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamAudioPortFormat:
+ {
+ OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (formatParams->nIndex > 0) {
+ return OMX_ErrorNoMore;
+ }
+
+ formatParams->eEncoding =
+ (formatParams->nPortIndex == 0)
+ ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAmr:
+ {
+ OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+ (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+ if (amrParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ amrParams->nChannels = 1;
+ amrParams->nBitRate = mBitRate;
+ amrParams->eAMRBandMode = (OMX_AUDIO_AMRBANDMODETYPE)(mMode + 1);
+ amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+ amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
+
+ pcmParams->nChannels = 1;
+ pcmParams->nSamplingRate = kSampleRate;
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftAMRNBEncoder::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_encoder.amrnb",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPortFormat:
+ {
+ const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (formatParams->nIndex > 0) {
+ return OMX_ErrorNoMore;
+ }
+
+ if ((formatParams->nPortIndex == 0
+ && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+ || (formatParams->nPortIndex == 1
+ && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAmr:
+ {
+ OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+ (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+ if (amrParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (amrParams->nChannels != 1
+ || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff
+ || amrParams->eAMRFrameFormat
+ != OMX_AUDIO_AMRFrameFormatFSF
+ || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeNB0
+ || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeNB7) {
+ return OMX_ErrorUndefined;
+ }
+
+ mBitRate = amrParams->nBitRate;
+ mMode = amrParams->eAMRBandMode - 1;
+
+ amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+ amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (pcmParams->nChannels != 1
+ || pcmParams->nSamplingRate != kSampleRate) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+void SoftAMRNBEncoder::onQueueFilled(OMX_U32 portIndex) {
+ if (mSignalledError) {
+ return;
+ }
+
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t);
+
+ for (;;) {
+ // We do the following until we run out of buffers.
+
+ while (mInputSize < numBytesPerInputFrame) {
+ // As long as there's still input data to be read we
+ // will drain "kNumSamplesPerFrame" samples
+ // into the "mInputFrame" buffer and then encode those
+ // as a unit into an output buffer.
+
+ if (mSawInputEOS || inQueue.empty()) {
+ return;
+ }
+
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+ size_t copy = numBytesPerInputFrame - mInputSize;
+ if (copy > inHeader->nFilledLen) {
+ copy = inHeader->nFilledLen;
+ }
+
+ if (mInputSize == 0) {
+ mInputTimeUs = inHeader->nTimeStamp;
+ }
+
+ memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+ mInputSize += copy;
+
+ inHeader->nOffset += copy;
+ inHeader->nFilledLen -= copy;
+
+ // "Time" on the input buffer has in effect advanced by the
+ // number of audio frames we just advanced nOffset by.
+ inHeader->nTimeStamp +=
+ (copy * 1000000ll / kSampleRate) / sizeof(int16_t);
+
+ if (inHeader->nFilledLen == 0) {
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw input EOS");
+ mSawInputEOS = true;
+
+ // Pad any remaining data with zeroes.
+ memset((uint8_t *)mInputFrame + mInputSize,
+ 0,
+ numBytesPerInputFrame - mInputSize);
+
+ mInputSize = numBytesPerInputFrame;
+ }
+
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+
+ inData = NULL;
+ inHeader = NULL;
+ inInfo = NULL;
+ }
+ }
+
+ // At this point we have all the input data necessary to encode
+ // a single frame, all we need is an output buffer to store the result
+ // in.
+
+ if (outQueue.empty()) {
+ return;
+ }
+
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset;
+ size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+ Frame_Type_3GPP frameType;
+ int res = AMREncode(
+ mEncState, mSidState, (Mode)mMode,
+ mInputFrame, outPtr, &frameType, AMR_TX_WMF);
+
+ CHECK_GE(res, 0);
+ CHECK_LE((size_t)res, outAvailable);
+
+ // Convert header byte from WMF to IETF format.
+ outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c;
+
+ outHeader->nFilledLen = res;
+ outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+ if (mSawInputEOS) {
+ // We also tag this output buffer with EOS if it corresponds
+ // to the final input buffer.
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ }
+
+ outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+ ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
+ nOutputBytes, mInputTimeUs, outHeader->nFlags);
+
+ hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+ outQueue.erase(outQueue.begin());
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+
+ outHeader = NULL;
+ outInfo = NULL;
+
+ mInputSize = 0;
+ }
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ return new android::SoftAMRNBEncoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
new file mode 100644
index 0000000..50178c4
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AMRNB_ENCODER_H_
+
+#define SOFT_AMRNB_ENCODER_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+namespace android {
+
+struct SoftAMRNBEncoder : public SimpleSoftOMXComponent {
+ SoftAMRNBEncoder(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftAMRNBEncoder();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+ enum {
+ kNumBuffers = 4,
+ kNumSamplesPerFrame = 160,
+ };
+
+ void *mEncState;
+ void *mSidState;
+
+ OMX_U32 mBitRate;
+ int mMode;
+
+ size_t mInputSize;
+ int16_t mInputFrame[kNumSamplesPerFrame];
+ int64_t mInputTimeUs;
+
+ bool mSawInputEOS;
+ bool mSignalledError;
+
+ void initPorts();
+ status_t initEncoder();
+
+ status_t setAudioParams();
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftAMRNBEncoder);
+};
+
+} // namespace android
+
+#endif // SOFT_AMRNB_ENCODER_H_
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index ae43870..6ce6171 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -117,4 +117,26 @@
include $(BUILD_STATIC_LIBRARY)
+################################################################################
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ SoftAMRWBEncoder.cpp
+
+LOCAL_C_INCLUDES := \
+ frameworks/base/media/libstagefright/include \
+ frameworks/base/include/media/stagefright/openmax \
+ frameworks/base/media/libstagefright/codecs/common/include \
+
+LOCAL_STATIC_LIBRARIES := \
+ libstagefright_amrwbenc
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright_omx libstagefright_foundation libutils \
+ libstagefright_enc_common
+
+LOCAL_MODULE := libstagefright_soft_amrwbenc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
new file mode 100644
index 0000000..9ccb49c
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAMRWBEncoder"
+#include <utils/Log.h>
+
+#include "SoftAMRWBEncoder.h"
+
+#include "cmnMemory.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+static const int32_t kSampleRate = 16000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+SoftAMRWBEncoder::SoftAMRWBEncoder(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mEncoderHandle(NULL),
+ mApiHandle(NULL),
+ mMemOperator(NULL),
+ mBitRate(0),
+ mMode(VOAMRWB_MD66),
+ mInputSize(0),
+ mInputTimeUs(-1ll),
+ mSawInputEOS(false),
+ mSignalledError(false) {
+ initPorts();
+ CHECK_EQ(initEncoder(), (status_t)OK);
+}
+
+SoftAMRWBEncoder::~SoftAMRWBEncoder() {
+ if (mEncoderHandle != NULL) {
+ CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
+ mEncoderHandle = NULL;
+ }
+
+ delete mApiHandle;
+ mApiHandle = NULL;
+
+ delete mMemOperator;
+ mMemOperator = NULL;
+}
+
+void SoftAMRWBEncoder::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 8192;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/amr-wb");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingAMR;
+
+ addPort(def);
+}
+
+status_t SoftAMRWBEncoder::initEncoder() {
+ mApiHandle = new VO_AUDIO_CODECAPI;
+
+ if (VO_ERR_NONE != voGetAMRWBEncAPI(mApiHandle)) {
+ ALOGE("Failed to get api handle");
+ return UNKNOWN_ERROR;
+ }
+
+ mMemOperator = new VO_MEM_OPERATOR;
+ mMemOperator->Alloc = cmnMemAlloc;
+ mMemOperator->Copy = cmnMemCopy;
+ mMemOperator->Free = cmnMemFree;
+ mMemOperator->Set = cmnMemSet;
+ mMemOperator->Check = cmnMemCheck;
+
+ VO_CODEC_INIT_USERDATA userData;
+ memset(&userData, 0, sizeof(userData));
+ userData.memflag = VO_IMF_USERMEMOPERATOR;
+ userData.memData = (VO_PTR) mMemOperator;
+
+ if (VO_ERR_NONE != mApiHandle->Init(
+ &mEncoderHandle, VO_AUDIO_CodingAMRWB, &userData)) {
+ ALOGE("Failed to init AMRWB encoder");
+ return UNKNOWN_ERROR;
+ }
+
+ VOAMRWBFRAMETYPE type = VOAMRWB_RFC3267;
+ if (VO_ERR_NONE != mApiHandle->SetParam(
+ mEncoderHandle, VO_PID_AMRWB_FRAMETYPE, &type)) {
+ ALOGE("Failed to set AMRWB encoder frame type to %d", type);
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+OMX_ERRORTYPE SoftAMRWBEncoder::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamAudioPortFormat:
+ {
+ OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (formatParams->nIndex > 0) {
+ return OMX_ErrorNoMore;
+ }
+
+ formatParams->eEncoding =
+ (formatParams->nPortIndex == 0)
+ ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAmr:
+ {
+ OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+ (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+ if (amrParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ amrParams->nChannels = 1;
+ amrParams->nBitRate = mBitRate;
+
+ amrParams->eAMRBandMode =
+ (OMX_AUDIO_AMRBANDMODETYPE)(mMode + OMX_AUDIO_AMRBandModeWB0);
+
+ amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+ amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
+
+ pcmParams->nChannels = 1;
+ pcmParams->nSamplingRate = kSampleRate;
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftAMRWBEncoder::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_encoder.amrwb",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPortFormat:
+ {
+ const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (formatParams->nIndex > 0) {
+ return OMX_ErrorNoMore;
+ }
+
+ if ((formatParams->nPortIndex == 0
+ && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+ || (formatParams->nPortIndex == 1
+ && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAmr:
+ {
+ OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+ (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+ if (amrParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (amrParams->nChannels != 1
+ || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff
+ || amrParams->eAMRFrameFormat
+ != OMX_AUDIO_AMRFrameFormatFSF
+ || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeWB0
+ || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeWB8) {
+ return OMX_ErrorUndefined;
+ }
+
+ mBitRate = amrParams->nBitRate;
+
+ mMode = (VOAMRWBMODE)(
+ amrParams->eAMRBandMode - OMX_AUDIO_AMRBandModeWB0);
+
+ amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+ amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+ if (VO_ERR_NONE !=
+ mApiHandle->SetParam(
+ mEncoderHandle, VO_PID_AMRWB_MODE, &mMode)) {
+ ALOGE("Failed to set AMRWB encoder mode to %d", mMode);
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (pcmParams->nChannels != 1
+ || pcmParams->nSamplingRate != (OMX_U32)kSampleRate) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+void SoftAMRWBEncoder::onQueueFilled(OMX_U32 portIndex) {
+ if (mSignalledError) {
+ return;
+ }
+
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t);
+
+ for (;;) {
+ // We do the following until we run out of buffers.
+
+ while (mInputSize < numBytesPerInputFrame) {
+ // As long as there's still input data to be read we
+ // will drain "kNumSamplesPerFrame" samples
+ // into the "mInputFrame" buffer and then encode those
+ // as a unit into an output buffer.
+
+ if (mSawInputEOS || inQueue.empty()) {
+ return;
+ }
+
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+ size_t copy = numBytesPerInputFrame - mInputSize;
+ if (copy > inHeader->nFilledLen) {
+ copy = inHeader->nFilledLen;
+ }
+
+ if (mInputSize == 0) {
+ mInputTimeUs = inHeader->nTimeStamp;
+ }
+
+ memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+ mInputSize += copy;
+
+ inHeader->nOffset += copy;
+ inHeader->nFilledLen -= copy;
+
+ // "Time" on the input buffer has in effect advanced by the
+ // number of audio frames we just advanced nOffset by.
+ inHeader->nTimeStamp +=
+ (copy * 1000000ll / kSampleRate) / sizeof(int16_t);
+
+ if (inHeader->nFilledLen == 0) {
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw input EOS");
+ mSawInputEOS = true;
+
+ // Pad any remaining data with zeroes.
+ memset((uint8_t *)mInputFrame + mInputSize,
+ 0,
+ numBytesPerInputFrame - mInputSize);
+
+ mInputSize = numBytesPerInputFrame;
+ }
+
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+
+ inData = NULL;
+ inHeader = NULL;
+ inInfo = NULL;
+ }
+ }
+
+ // At this point we have all the input data necessary to encode
+ // a single frame, all we need is an output buffer to store the result
+ // in.
+
+ if (outQueue.empty()) {
+ return;
+ }
+
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset;
+ size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+ VO_CODECBUFFER inputData;
+ memset(&inputData, 0, sizeof(inputData));
+ inputData.Buffer = (unsigned char *) mInputFrame;
+ inputData.Length = mInputSize;
+
+ CHECK_EQ(VO_ERR_NONE,
+ mApiHandle->SetInputData(mEncoderHandle, &inputData));
+
+ VO_CODECBUFFER outputData;
+ memset(&outputData, 0, sizeof(outputData));
+ VO_AUDIO_OUTPUTINFO outputInfo;
+ memset(&outputInfo, 0, sizeof(outputInfo));
+
+ outputData.Buffer = outPtr;
+ outputData.Length = outAvailable;
+ VO_U32 ret = mApiHandle->GetOutputData(
+ mEncoderHandle, &outputData, &outputInfo);
+ CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL);
+
+ outHeader->nFilledLen = outputData.Length;
+ outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+ if (mSawInputEOS) {
+ // We also tag this output buffer with EOS if it corresponds
+ // to the final input buffer.
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ }
+
+ outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+ ALOGI("sending %ld bytes of data (time = %lld us, flags = 0x%08lx)",
+ outHeader->nFilledLen, mInputTimeUs, outHeader->nFlags);
+
+ hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+ outQueue.erase(outQueue.begin());
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+
+ outHeader = NULL;
+ outInfo = NULL;
+
+ mInputSize = 0;
+ }
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ return new android::SoftAMRWBEncoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
new file mode 100644
index 0000000..d0c1dab
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AMRWB_ENCODER_H_
+
+#define SOFT_AMRWB_ENCODER_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+#include "voAMRWB.h"
+
+struct VO_AUDIO_CODECAPI;
+struct VO_MEM_OPERATOR;
+
+namespace android {
+
+struct SoftAMRWBEncoder : public SimpleSoftOMXComponent {
+ SoftAMRWBEncoder(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftAMRWBEncoder();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+ enum {
+ kNumBuffers = 4,
+ kNumSamplesPerFrame = 320,
+ };
+
+ void *mEncoderHandle;
+ VO_AUDIO_CODECAPI *mApiHandle;
+ VO_MEM_OPERATOR *mMemOperator;
+
+ OMX_U32 mBitRate;
+ VOAMRWBMODE mMode;
+
+ size_t mInputSize;
+ int16_t mInputFrame[kNumSamplesPerFrame];
+ int64_t mInputTimeUs;
+
+ bool mSawInputEOS;
+ bool mSignalledError;
+
+ void initPorts();
+ status_t initEncoder();
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftAMRWBEncoder);
+};
+
+} // namespace android
+
+#endif // SOFT_AMRWB_ENCODER_H_
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 5cc3f78..f3ef3de 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -144,8 +144,8 @@
return ERROR_UNSUPPORTED;
}
- uint32_t *dst_ptr = (uint32_t *)dst.mBits
- + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+ uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ + dst.mCropTop * dst.mWidth + dst.mCropLeft;
const uint8_t *src_ptr = (const uint8_t *)src.mBits
+ (src.mCropTop * dst.mWidth + src.mCropLeft) * 2;
@@ -182,11 +182,15 @@
| ((kAdjustedClip[g2] >> 2) << 5)
| (kAdjustedClip[b2] >> 3);
- dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+ if (x + 1 < src.cropWidth()) {
+ *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+ } else {
+ dst_ptr[x] = rgb1;
+ }
}
src_ptr += src.mWidth * 2;
- dst_ptr += dst.mWidth / 2;
+ dst_ptr += dst.mWidth;
}
return OK;
@@ -290,15 +294,14 @@
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((dst.mWidth & 3) == 0
- && (src.mCropLeft & 1) == 0
+ if (!((src.mCropLeft & 1) == 0
&& src.cropWidth() == dst.cropWidth()
&& src.cropHeight() == dst.cropHeight())) {
return ERROR_UNSUPPORTED;
}
- uint32_t *dst_ptr = (uint32_t *)dst.mBits
- + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+ uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ + dst.mCropTop * dst.mWidth + dst.mCropLeft;
const uint8_t *src_y =
(const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
@@ -340,7 +343,11 @@
| ((kAdjustedClip[g2] >> 2) << 5)
| (kAdjustedClip[r2] >> 3);
- dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+ if (x + 1 < src.cropWidth()) {
+ *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+ } else {
+ dst_ptr[x] = rgb1;
+ }
}
src_y += src.mWidth;
@@ -349,7 +356,7 @@
src_u += src.mWidth;
}
- dst_ptr += dst.mWidth / 2;
+ dst_ptr += dst.mWidth;
}
return OK;
@@ -361,15 +368,14 @@
uint8_t *kAdjustedClip = initClip();
- if (!((dst.mWidth & 3) == 0
- && (src.mCropLeft & 1) == 0
+ if (!((src.mCropLeft & 1) == 0
&& src.cropWidth() == dst.cropWidth()
&& src.cropHeight() == dst.cropHeight())) {
return ERROR_UNSUPPORTED;
}
- uint32_t *dst_ptr = (uint32_t *)dst.mBits
- + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+ uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ + dst.mCropTop * dst.mWidth + dst.mCropLeft;
const uint8_t *src_y =
(const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
@@ -411,7 +417,11 @@
| ((kAdjustedClip[g2] >> 2) << 5)
| (kAdjustedClip[r2] >> 3);
- dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+ if (x + 1 < src.cropWidth()) {
+ *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+ } else {
+ dst_ptr[x] = rgb1;
+ }
}
src_y += src.mWidth;
@@ -420,7 +430,7 @@
src_u += src.mWidth;
}
- dst_ptr += dst.mWidth / 2;
+ dst_ptr += dst.mWidth;
}
return OK;
@@ -430,15 +440,14 @@
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((dst.mWidth & 3) == 0
- && (src.mCropLeft & 1) == 0
+ if (!((src.mCropLeft & 1) == 0
&& src.cropWidth() == dst.cropWidth()
&& src.cropHeight() == dst.cropHeight())) {
return ERROR_UNSUPPORTED;
}
- uint32_t *dst_ptr = (uint32_t *)dst.mBits
- + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+ uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ + dst.mCropTop * dst.mWidth + dst.mCropLeft;
const uint8_t *src_y = (const uint8_t *)src.mBits;
@@ -478,7 +487,11 @@
| ((kAdjustedClip[g2] >> 2) << 5)
| (kAdjustedClip[b2] >> 3);
- dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+ if (x + 1 < src.cropWidth()) {
+ *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+ } else {
+ dst_ptr[x] = rgb1;
+ }
}
src_y += src.mWidth;
@@ -487,7 +500,7 @@
src_u += src.mWidth;
}
- dst_ptr += dst.mWidth / 2;
+ dst_ptr += dst.mWidth;
}
return OK;
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 82c6476..a7a3d47 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -41,7 +41,7 @@
class DrmManagerClinet;
class DecryptHandle;
-class TimedTextPlayer;
+class TimedTextDriver;
struct WVMExtractor;
struct AwesomeRenderer : public RefBase {
@@ -232,7 +232,7 @@
sp<DecryptHandle> mDecryptHandle;
int64_t mLastVideoTimeUs;
- TimedTextPlayer *mTextPlayer;
+ TimedTextDriver *mTextDriver;
mutable Mutex mTimedTextLock;
sp<WVMExtractor> mWVMExtractor;
@@ -326,4 +326,3 @@
} // namespace android
#endif // AWESOME_PLAYER_H_
-
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 53e764f..2c87b34 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -31,7 +31,7 @@
public:
OMX();
- virtual bool livesLocally(pid_t pid);
+ virtual bool livesLocally(node_id node, pid_t pid);
virtual status_t listNodes(List<ComponentInfo> *list);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 694b12d..ace883c 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -185,7 +185,7 @@
instance->onObserverDied(mMaster);
}
-bool OMX::livesLocally(pid_t pid) {
+bool OMX::livesLocally(node_id node, pid_t pid) {
return pid == getpid();
}
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 0914f32..c79e01f 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -333,8 +333,9 @@
void SimpleSoftOMXComponent::onMessageReceived(const sp<AMessage> &msg) {
Mutex::Autolock autoLock(mLock);
-
- switch (msg->what()) {
+ uint32_t msgType = msg->what();
+ ALOGV("msgType = %d", msgType);
+ switch (msgType) {
case kWhatSendCommand:
{
int32_t cmd, param;
@@ -354,27 +355,27 @@
CHECK(mState == OMX_StateExecuting && mTargetState == mState);
bool found = false;
- for (size_t i = 0; i < mPorts.size(); ++i) {
- PortInfo *port = &mPorts.editItemAt(i);
+ size_t portIndex = (kWhatEmptyThisBuffer == msgType)?
+ header->nInputPortIndex: header->nOutputPortIndex;
+ PortInfo *port = &mPorts.editItemAt(portIndex);
- for (size_t j = 0; j < port->mBuffers.size(); ++j) {
- BufferInfo *buffer = &port->mBuffers.editItemAt(j);
+ for (size_t j = 0; j < port->mBuffers.size(); ++j) {
+ BufferInfo *buffer = &port->mBuffers.editItemAt(j);
- if (buffer->mHeader == header) {
- CHECK(!buffer->mOwnedByUs);
+ if (buffer->mHeader == header) {
+ CHECK(!buffer->mOwnedByUs);
- buffer->mOwnedByUs = true;
+ buffer->mOwnedByUs = true;
- CHECK((msg->what() == kWhatEmptyThisBuffer
- && port->mDef.eDir == OMX_DirInput)
- || (port->mDef.eDir == OMX_DirOutput));
+ CHECK((msgType == kWhatEmptyThisBuffer
+ && port->mDef.eDir == OMX_DirInput)
+ || (port->mDef.eDir == OMX_DirOutput));
- port->mQueue.push_back(buffer);
- onQueueFilled(i);
+ port->mQueue.push_back(buffer);
+ onQueueFilled(portIndex);
- found = true;
- break;
- }
+ found = true;
+ break;
}
}
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index cf9e8c9..99ffe7d 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -37,7 +37,9 @@
{ "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" },
{ "OMX.google.aac.encoder", "aacenc", "audio_encoder.aac" },
{ "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
+ { "OMX.google.amrnb.encoder", "amrnbenc", "audio_encoder.amrnb" },
{ "OMX.google.amrwb.decoder", "amrdec", "audio_decoder.amrwb" },
+ { "OMX.google.amrwb.encoder", "amrwbenc", "audio_encoder.amrwb" },
{ "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" },
{ "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" },
{ "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index bf69428..41c08be 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -7,11 +7,13 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright libbinder libmedia libutils
-LOCAL_C_INCLUDES:= \
+LOCAL_C_INCLUDES := \
$(JNI_H_INCLUDE) \
frameworks/base/media/libstagefright \
$(TOP)/frameworks/base/include/media/stagefright/openmax
-LOCAL_MODULE:= omx_tests
+LOCAL_MODULE := omx_tests
+
+LOCAL_MODULE_TAGS := tests
include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index 59d0e15..8b23dee 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -3,7 +3,10 @@
LOCAL_SRC_FILES:= \
TextDescriptions.cpp \
- TimedTextParser.cpp \
+ TimedTextDriver.cpp \
+ TimedTextInBandSource.cpp \
+ TimedTextSource.cpp \
+ TimedTextSRTSource.cpp \
TimedTextPlayer.cpp
LOCAL_CFLAGS += -Wno-multichar
diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp
new file mode 100644
index 0000000..9ec9415
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextDriver.cpp
@@ -0,0 +1,223 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextDriver"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+
+#include "TimedTextDriver.h"
+
+#include "TextDescriptions.h"
+#include "TimedTextPlayer.h"
+#include "TimedTextSource.h"
+
+namespace android {
+
+TimedTextDriver::TimedTextDriver(
+ const wp<MediaPlayerBase> &listener)
+ : mLooper(new ALooper),
+ mListener(listener),
+ mState(UNINITIALIZED) {
+ mLooper->setName("TimedTextDriver");
+ mLooper->start();
+ mPlayer = new TimedTextPlayer(listener);
+ mLooper->registerHandler(mPlayer);
+}
+
+TimedTextDriver::~TimedTextDriver() {
+ mTextInBandVector.clear();
+ mTextOutOfBandVector.clear();
+ mLooper->stop();
+}
+
+status_t TimedTextDriver::setTimedTextTrackIndex_l(int32_t index) {
+ if (index >=
+ (int)(mTextInBandVector.size() + mTextOutOfBandVector.size())) {
+ return BAD_VALUE;
+ }
+
+ sp<TimedTextSource> source;
+ if (index < mTextInBandVector.size()) {
+ source = mTextInBandVector.itemAt(index);
+ } else {
+ source = mTextOutOfBandVector.itemAt(index - mTextInBandVector.size());
+ }
+ mPlayer->setDataSource(source);
+ return OK;
+}
+
+status_t TimedTextDriver::start() {
+ Mutex::Autolock autoLock(mLock);
+ switch (mState) {
+ case UNINITIALIZED:
+ return INVALID_OPERATION;
+ case STOPPED:
+ mPlayer->start();
+ break;
+ case PLAYING:
+ return OK;
+ case PAUSED:
+ mPlayer->resume();
+ break;
+ default:
+ TRESPASS();
+ }
+ mState = PLAYING;
+ return OK;
+}
+
+status_t TimedTextDriver::stop() {
+ return pause();
+}
+
+// TODO: Test if pause() works properly.
+// Scenario 1: start - pause - resume
+// Scenario 2: start - seek
+// Scenario 3: start - pause - seek - resume
+status_t TimedTextDriver::pause() {
+ Mutex::Autolock autoLock(mLock);
+ switch (mState) {
+ case UNINITIALIZED:
+ return INVALID_OPERATION;
+ case STOPPED:
+ return OK;
+ case PLAYING:
+ mPlayer->pause();
+ break;
+ case PAUSED:
+ return OK;
+ default:
+ TRESPASS();
+ }
+ mState = PAUSED;
+ return OK;
+}
+
+status_t TimedTextDriver::resume() {
+ return start();
+}
+
+status_t TimedTextDriver::seekToAsync(int64_t timeUs) {
+ mPlayer->seekToAsync(timeUs);
+ return OK;
+}
+
+status_t TimedTextDriver::setTimedTextTrackIndex(int32_t index) {
+ // TODO: This is current implementation for MediaPlayer::disableTimedText().
+ // Find better way for readability.
+ if (index < 0) {
+ mPlayer->pause();
+ return OK;
+ }
+
+ status_t ret = OK;
+ Mutex::Autolock autoLock(mLock);
+ switch (mState) {
+ case UNINITIALIZED:
+ ret = INVALID_OPERATION;
+ break;
+ case PAUSED:
+ ret = setTimedTextTrackIndex_l(index);
+ break;
+ case PLAYING:
+ mPlayer->pause();
+ ret = setTimedTextTrackIndex_l(index);
+ if (ret != OK) {
+ break;
+ }
+ mPlayer->start();
+ break;
+ case STOPPED:
+ // TODO: The only difference between STOPPED and PAUSED is this
+ // part. Revise the flow from "MediaPlayer::enableTimedText()" and
+ // remove one of the status, PAUSED and STOPPED, if possible.
+ ret = setTimedTextTrackIndex_l(index);
+ if (ret != OK) {
+ break;
+ }
+ mPlayer->start();
+ break;
+ defaut:
+ TRESPASS();
+ }
+ return ret;
+}
+
+status_t TimedTextDriver::addInBandTextSource(
+ const sp<MediaSource>& mediaSource) {
+ sp<TimedTextSource> source =
+ TimedTextSource::CreateTimedTextSource(mediaSource);
+ if (source == NULL) {
+ return ERROR_UNSUPPORTED;
+ }
+ Mutex::Autolock autoLock(mLock);
+ mTextInBandVector.add(source);
+ if (mState == UNINITIALIZED) {
+ mState = STOPPED;
+ }
+ return OK;
+}
+
+status_t TimedTextDriver::addOutOfBandTextSource(
+ const Parcel &request) {
+ // TODO: Define "TimedTextSource::CreateFromURI(uri)"
+ // and move below lines there..?
+
+ // String values written in Parcel are UTF-16 values.
+ const String16 uri16 = request.readString16();
+ String8 uri = String8(request.readString16());
+
+ uri.toLower();
+ // To support local subtitle file only for now
+ if (strncasecmp("file://", uri.string(), 7)) {
+ return ERROR_UNSUPPORTED;
+ }
+ sp<DataSource> dataSource =
+ DataSource::CreateFromURI(uri);
+ if (dataSource == NULL) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ sp<TimedTextSource> source;
+ if (uri.getPathExtension() == String8(".srt")) {
+ source = TimedTextSource::CreateTimedTextSource(
+ dataSource, TimedTextSource::OUT_OF_BAND_FILE_SRT);
+ }
+
+ if (source == NULL) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ Mutex::Autolock autoLock(mLock);
+
+ mTextOutOfBandVector.add(source);
+ if (mState == UNINITIALIZED) {
+ mState = STOPPED;
+ }
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextDriver.h b/media/libstagefright/timedtext/TimedTextDriver.h
new file mode 100644
index 0000000..efedb6e
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextDriver.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_DRIVER_H_
+#define TIMED_TEXT_DRIVER_H_
+
+#include <media/stagefright/foundation/ABase.h> // for DISALLOW_* macro
+#include <utils/Errors.h> // for status_t
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ALooper;
+class MediaPlayerBase;
+class MediaSource;
+class Parcel;
+class TimedTextPlayer;
+class TimedTextSource;
+
+class TimedTextDriver {
+public:
+ TimedTextDriver(const wp<MediaPlayerBase> &listener);
+
+ ~TimedTextDriver();
+
+ // TODO: pause-resume pair seems equivalent to stop-start pair.
+ // Check if it is replaceable with stop-start.
+ status_t start();
+ status_t stop();
+ status_t pause();
+ status_t resume();
+
+ status_t seekToAsync(int64_t timeUs);
+
+ status_t addInBandTextSource(const sp<MediaSource>& source);
+ status_t addOutOfBandTextSource(const Parcel &request);
+
+ status_t setTimedTextTrackIndex(int32_t index);
+
+private:
+ Mutex mLock;
+
+ enum State {
+ UNINITIALIZED,
+ STOPPED,
+ PLAYING,
+ PAUSED,
+ };
+
+ sp<ALooper> mLooper;
+ sp<TimedTextPlayer> mPlayer;
+ wp<MediaPlayerBase> mListener;
+
+ // Variables to be guarded by mLock.
+ State mState;
+ Vector<sp<TimedTextSource> > mTextInBandVector;
+ Vector<sp<TimedTextSource> > mTextOutOfBandVector;
+ // -- End of variables to be guarded by mLock
+
+ status_t setTimedTextTrackIndex_l(int32_t index);
+
+ DISALLOW_EVIL_CONSTRUCTORS(TimedTextDriver);
+};
+
+} // namespace android
+
+#endif // TIMED_TEXT_DRIVER_H_
diff --git a/media/libstagefright/timedtext/TimedTextInBandSource.cpp b/media/libstagefright/timedtext/TimedTextInBandSource.cpp
new file mode 100644
index 0000000..f2c4d54
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextInBandSource.cpp
@@ -0,0 +1,118 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextInBandSource"
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDebug.h> // CHECK_XX macro
+#include <media/stagefright/MediaDefs.h> // for MEDIA_MIMETYPE_xxx
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+#include "TimedTextInBandSource.h"
+#include "TextDescriptions.h"
+
+namespace android {
+
+TimedTextInBandSource::TimedTextInBandSource(const sp<MediaSource>& mediaSource)
+ : mSource(mediaSource) {
+}
+
+TimedTextInBandSource::~TimedTextInBandSource() {
+}
+
+status_t TimedTextInBandSource::read(
+ int64_t *timeUs, Parcel *parcel, const MediaSource::ReadOptions *options) {
+ MediaBuffer *textBuffer = NULL;
+ status_t err = mSource->read(&textBuffer, options);
+ if (err != OK) {
+ return err;
+ }
+ CHECK(textBuffer != NULL);
+ textBuffer->meta_data()->findInt64(kKeyTime, timeUs);
+ // TODO: this is legacy code. when 'timeUs' can be <= 0?
+ if (*timeUs > 0) {
+ extractAndAppendLocalDescriptions(*timeUs, textBuffer, parcel);
+ }
+ textBuffer->release();
+ return OK;
+}
+
+// Each text sample consists of a string of text, optionally with sample
+// modifier description. The modifier description could specify a new
+// text style for the string of text. These descriptions are present only
+// if they are needed. This method is used to extract the modifier
+// description and append it at the end of the text.
+status_t TimedTextInBandSource::extractAndAppendLocalDescriptions(
+ int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel) {
+ const void *data;
+ size_t size = 0;
+ int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
+
+ const char *mime;
+ CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+
+ if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
+ data = textBuffer->data();
+ size = textBuffer->size();
+
+ if (size > 0) {
+ parcel->freeData();
+ flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+ return TextDescriptions::getParcelOfDescriptions(
+ (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
+ }
+ return OK;
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+// To extract and send the global text descriptions for all the text samples
+// in the text track or text file.
+// TODO: send error message to application via notifyListener()...?
+status_t TimedTextInBandSource::extractGlobalDescriptions(Parcel *parcel) {
+ const void *data;
+ size_t size = 0;
+ int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS;
+
+ const char *mime;
+ CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+
+ // support 3GPP only for now
+ if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
+ uint32_t type;
+ // get the 'tx3g' box content. This box contains the text descriptions
+ // used to render the text track
+ if (!mSource->getFormat()->findData(
+ kKeyTextFormatData, &type, &data, &size)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (size > 0) {
+ flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+ return TextDescriptions::getParcelOfDescriptions(
+ (const uint8_t *)data, size, flag, 0, parcel);
+ }
+ return OK;
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextInBandSource.h b/media/libstagefright/timedtext/TimedTextInBandSource.h
new file mode 100644
index 0000000..26e5737
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextInBandSource.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_IN_BAND_SOURCE_H_
+#define TIMED_TEXT_IN_BAND_SOURCE_H_
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+
+#include "TimedTextSource.h"
+
+namespace android {
+
+class MediaBuffer;
+class Parcel;
+
+class TimedTextInBandSource : public TimedTextSource {
+ public:
+ TimedTextInBandSource(const sp<MediaSource>& mediaSource);
+ virtual status_t start() { return mSource->start(); }
+ virtual status_t stop() { return mSource->stop(); }
+ virtual status_t read(
+ int64_t *timeUs,
+ Parcel *parcel,
+ const MediaSource::ReadOptions *options = NULL);
+ virtual status_t extractGlobalDescriptions(Parcel *parcel);
+
+ protected:
+ virtual ~TimedTextInBandSource();
+
+ private:
+ sp<MediaSource> mSource;
+
+ status_t extractAndAppendLocalDescriptions(
+ int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel);
+
+ DISALLOW_EVIL_CONSTRUCTORS(TimedTextInBandSource);
+};
+
+} // namespace android
+
+#endif // TIMED_TEXT_IN_BAND_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextParser.cpp b/media/libstagefright/timedtext/TimedTextParser.cpp
deleted file mode 100644
index caea0a4..0000000
--- a/media/libstagefright/timedtext/TimedTextParser.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TimedTextParser.h"
-#include <media/stagefright/DataSource.h>
-
-namespace android {
-
-TimedTextParser::TimedTextParser()
- : mDataSource(NULL),
- mOffset(0),
- mIndex(0) {
-}
-
-TimedTextParser::~TimedTextParser() {
- reset();
-}
-
-status_t TimedTextParser::init(
- const sp<DataSource> &dataSource, FileType fileType) {
- mDataSource = dataSource;
- mFileType = fileType;
-
- status_t err;
- if ((err = scanFile()) != OK) {
- reset();
- return err;
- }
-
- return OK;
-}
-
-void TimedTextParser::reset() {
- mDataSource.clear();
- mTextVector.clear();
- mOffset = 0;
- mIndex = 0;
-}
-
-// scan the text file to get start/stop time and the
-// offset of each piece of text content
-status_t TimedTextParser::scanFile() {
- if (mFileType != OUT_OF_BAND_FILE_SRT) {
- return ERROR_UNSUPPORTED;
- }
-
- off64_t offset = 0;
- int64_t startTimeUs;
- bool endOfFile = false;
-
- while (!endOfFile) {
- TextInfo info;
- status_t err = getNextInSrtFileFormat(&offset, &startTimeUs, &info);
-
- if (err != OK) {
- if (err == ERROR_END_OF_STREAM) {
- endOfFile = true;
- } else {
- return err;
- }
- } else {
- mTextVector.add(startTimeUs, info);
- }
- }
-
- if (mTextVector.isEmpty()) {
- return ERROR_MALFORMED;
- }
- return OK;
-}
-
-// read one line started from *offset and store it into data.
-status_t TimedTextParser::readNextLine(off64_t *offset, AString *data) {
- char character;
-
- data->clear();
-
- while (true) {
- ssize_t err;
- if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) {
- if (err == 0) {
- return ERROR_END_OF_STREAM;
- }
- return ERROR_IO;
- }
-
- (*offset) ++;
-
- // a line could end with CR, LF or CR + LF
- if (character == 10) {
- break;
- } else if (character == 13) {
- if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) {
- if (err == 0) { // end of the stream
- return OK;
- }
- return ERROR_IO;
- }
-
- (*offset) ++;
-
- if (character != 10) {
- (*offset) --;
- }
- break;
- }
-
- data->append(character);
- }
-
- return OK;
-}
-
-/* SRT format:
- * Subtitle number
- * Start time --> End time
- * Text of subtitle (one or more lines)
- * Blank lines
- *
- * .srt file example:
- * 1
- * 00:00:20,000 --> 00:00:24,400
- * Altocumulus clouds occur between six thousand
- *
- * 2
- * 00:00:24,600 --> 00:00:27,800
- * and twenty thousand feet above ground level.
- */
-status_t TimedTextParser::getNextInSrtFileFormat(
- off64_t *offset, int64_t *startTimeUs, TextInfo *info) {
- AString data;
- status_t err;
-
- // To skip blank lines.
- do {
- if ((err = readNextLine(offset, &data)) != OK) {
- return err;
- }
- data.trim();
- } while(data.empty());
-
- // Just ignore the first non-blank line which is subtitle sequence number.
-
- if ((err = readNextLine(offset, &data)) != OK) {
- return err;
- }
- int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2;
- // the start time format is: hours:minutes:seconds,milliseconds
- // 00:00:24,600 --> 00:00:27,800
- if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
- &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) {
- return ERROR_MALFORMED;
- }
-
- *startTimeUs = ((hour1 * 3600 + min1 * 60 + sec1) * 1000 + msec1) * 1000ll;
- info->endTimeUs = ((hour2 * 3600 + min2 * 60 + sec2) * 1000 + msec2) * 1000ll;
- if (info->endTimeUs <= *startTimeUs) {
- return ERROR_MALFORMED;
- }
-
- info->offset = *offset;
-
- bool needMoreData = true;
- while (needMoreData) {
- if ((err = readNextLine(offset, &data)) != OK) {
- if (err == ERROR_END_OF_STREAM) {
- needMoreData = false;
- } else {
- return err;
- }
- }
-
- if (needMoreData) {
- data.trim();
- if (data.empty()) {
- // it's an empty line used to separate two subtitles
- needMoreData = false;
- }
- }
- }
-
- info->textLen = *offset - info->offset;
-
- return OK;
-}
-
-status_t TimedTextParser::getText(
- AString *text, int64_t *startTimeUs, int64_t *endTimeUs,
- const MediaSource::ReadOptions *options) {
- Mutex::Autolock autoLock(mLock);
-
- text->clear();
-
- int64_t seekTimeUs;
- MediaSource::ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- int64_t lastEndTimeUs = mTextVector.valueAt(mTextVector.size() - 1).endTimeUs;
- int64_t firstStartTimeUs = mTextVector.keyAt(0);
-
- if (seekTimeUs < 0 || seekTimeUs > lastEndTimeUs) {
- return ERROR_OUT_OF_RANGE;
- } else if (seekTimeUs < firstStartTimeUs) {
- mIndex = 0;
- } else {
- // binary search
- ssize_t low = 0;
- ssize_t high = mTextVector.size() - 1;
- ssize_t mid = 0;
- int64_t currTimeUs;
-
- while (low <= high) {
- mid = low + (high - low)/2;
- currTimeUs = mTextVector.keyAt(mid);
- const int diff = currTimeUs - seekTimeUs;
-
- if (diff == 0) {
- break;
- } else if (diff < 0) {
- low = mid + 1;
- } else {
- if ((high == mid + 1)
- && (seekTimeUs < mTextVector.keyAt(high))) {
- break;
- }
- high = mid - 1;
- }
- }
-
- mIndex = mid;
- }
- }
-
- TextInfo info = mTextVector.valueAt(mIndex);
- *startTimeUs = mTextVector.keyAt(mIndex);
- *endTimeUs = info.endTimeUs;
- mIndex ++;
-
- char *str = new char[info.textLen];
- if (mDataSource->readAt(info.offset, str, info.textLen) < info.textLen) {
- delete[] str;
- return ERROR_IO;
- }
-
- text->append(str, info.textLen);
- delete[] str;
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextParser.h b/media/libstagefright/timedtext/TimedTextParser.h
deleted file mode 100644
index 44774c2..0000000
--- a/media/libstagefright/timedtext/TimedTextParser.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_PARSER_H_
-
-#define TIMED_TEXT_PARSER_H_
-
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-class DataSource;
-
-class TimedTextParser : public RefBase {
-public:
- TimedTextParser();
- virtual ~TimedTextParser();
-
- enum FileType {
- OUT_OF_BAND_FILE_SRT = 1,
- };
-
- status_t getText(AString *text, int64_t *startTimeUs, int64_t *endTimeUs,
- const MediaSource::ReadOptions *options = NULL);
- status_t init(const sp<DataSource> &dataSource, FileType fileType);
- void reset();
-
-private:
- Mutex mLock;
-
- sp<DataSource> mDataSource;
- off64_t mOffset;
-
- struct TextInfo {
- int64_t endTimeUs;
- // the offset of the text in the original file
- off64_t offset;
- int textLen;
- };
-
- int mIndex;
- FileType mFileType;
-
- // the key indicated the start time of the text
- KeyedVector<int64_t, TextInfo> mTextVector;
-
- status_t getNextInSrtFileFormat(
- off64_t *offset, int64_t *startTimeUs, TextInfo *info);
- status_t readNextLine(off64_t *offset, AString *data);
-
- status_t scanFile();
-
- DISALLOW_EVIL_CONSTRUCTORS(TimedTextParser);
-};
-
-} // namespace android
-
-#endif // TIMED_TEXT_PARSER_H_
-
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index 3014b0b..8c2df88 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,399 +18,164 @@
#define LOG_TAG "TimedTextPlayer"
#include <utils/Log.h>
-#include <binder/IPCThreadState.h>
-
+#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/Utils.h>
+#include <media/MediaPlayerInterface.h>
-#include "include/AwesomePlayer.h"
#include "TimedTextPlayer.h"
-#include "TimedTextParser.h"
-#include "TextDescriptions.h"
+
+#include "TimedTextDriver.h"
+#include "TimedTextSource.h"
namespace android {
-struct TimedTextEvent : public TimedEventQueue::Event {
- TimedTextEvent(
- TimedTextPlayer *player,
- void (TimedTextPlayer::*method)())
- : mPlayer(player),
- mMethod(method) {
- }
+static const int64_t kAdjustmentProcessingTimeUs = 100000ll;
-protected:
- virtual ~TimedTextEvent() {}
-
- virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
- (mPlayer->*mMethod)();
- }
-
-private:
- TimedTextPlayer *mPlayer;
- void (TimedTextPlayer::*mMethod)();
-
- TimedTextEvent(const TimedTextEvent &);
- TimedTextEvent &operator=(const TimedTextEvent &);
-};
-
-TimedTextPlayer::TimedTextPlayer(
- AwesomePlayer *observer,
- const wp<MediaPlayerBase> &listener,
- TimedEventQueue *queue)
- : mSource(NULL),
- mOutOfBandSource(NULL),
- mSeekTimeUs(0),
- mStarted(false),
- mTextEventPending(false),
- mQueue(queue),
- mListener(listener),
- mObserver(observer),
- mTextBuffer(NULL),
- mTextParser(NULL),
- mTextType(kNoText) {
- mTextEvent = new TimedTextEvent(this, &TimedTextPlayer::onTextEvent);
+TimedTextPlayer::TimedTextPlayer(const wp<MediaPlayerBase> &listener)
+ : mListener(listener),
+ mSource(NULL),
+ mSendSubtitleGeneration(0) {
}
TimedTextPlayer::~TimedTextPlayer() {
- if (mStarted) {
- reset();
+ if (mSource != NULL) {
+ mSource->stop();
+ mSource.clear();
+ mSource = NULL;
}
-
- mTextTrackVector.clear();
- mTextOutOfBandVector.clear();
}
-status_t TimedTextPlayer::start(uint8_t index) {
- CHECK(!mStarted);
-
- if (index >=
- mTextTrackVector.size() + mTextOutOfBandVector.size()) {
- ALOGE("Incorrect text track index: %d", index);
- return BAD_VALUE;
- }
-
- status_t err;
- if (index < mTextTrackVector.size()) { // start an in-band text
- mSource = mTextTrackVector.itemAt(index);
-
- err = mSource->start();
-
- if (err != OK) {
- return err;
- }
- mTextType = kInBandText;
- } else { // start an out-of-band text
- OutOfBandText text =
- mTextOutOfBandVector.itemAt(index - mTextTrackVector.size());
-
- mOutOfBandSource = text.source;
- TimedTextParser::FileType fileType = text.type;
-
- if (mTextParser == NULL) {
- mTextParser = new TimedTextParser();
- }
-
- if ((err = mTextParser->init(mOutOfBandSource, fileType)) != OK) {
- return err;
- }
- mTextType = kOutOfBandText;
- }
-
- // send sample description format
- if ((err = extractAndSendGlobalDescriptions()) != OK) {
- return err;
- }
-
- int64_t positionUs;
- mObserver->getPosition(&positionUs);
- seekTo(positionUs);
-
- postTextEvent();
-
- mStarted = true;
-
- return OK;
+void TimedTextPlayer::start() {
+ sp<AMessage> msg = new AMessage(kWhatSeek, id());
+ msg->setInt64("seekTimeUs", -1);
+ msg->post();
}
void TimedTextPlayer::pause() {
- CHECK(mStarted);
-
- cancelTextEvent();
+ (new AMessage(kWhatPause, id()))->post();
}
void TimedTextPlayer::resume() {
- CHECK(mStarted);
-
- postTextEvent();
+ start();
}
-void TimedTextPlayer::reset() {
- CHECK(mStarted);
+void TimedTextPlayer::seekToAsync(int64_t timeUs) {
+ sp<AMessage> msg = new AMessage(kWhatSeek, id());
+ msg->setInt64("seekTimeUs", timeUs);
+ msg->post();
+}
- // send an empty text to clear the screen
- notifyListener(MEDIA_TIMED_TEXT);
+void TimedTextPlayer::setDataSource(sp<TimedTextSource> source) {
+ sp<AMessage> msg = new AMessage(kWhatSetSource, id());
+ msg->setObject("source", source);
+ msg->post();
+}
- cancelTextEvent();
-
- mSeeking = false;
- mStarted = false;
-
- if (mTextType == kInBandText) {
- if (mTextBuffer != NULL) {
- mTextBuffer->release();
- mTextBuffer = NULL;
+void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatPause: {
+ mSendSubtitleGeneration++;
+ break;
}
-
- if (mSource != NULL) {
- mSource->stop();
- mSource.clear();
- mSource = NULL;
+ case kWhatSeek: {
+ int64_t seekTimeUs = 0;
+ msg->findInt64("seekTimeUs", &seekTimeUs);
+ if (seekTimeUs < 0) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ int32_t positionMs = 0;
+ listener->getCurrentPosition(&positionMs);
+ seekTimeUs = positionMs * 1000ll;
+ }
+ }
+ doSeekAndRead(seekTimeUs);
+ break;
}
- } else {
- if (mTextParser != NULL) {
- mTextParser.clear();
- mTextParser = NULL;
+ case kWhatSendSubtitle: {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation != mSendSubtitleGeneration) {
+ // Drop obsolete msg.
+ break;
+ }
+ sp<RefBase> obj;
+ msg->findObject("subtitle", &obj);
+ if (obj != NULL) {
+ sp<ParcelEvent> parcelEvent;
+ parcelEvent = static_cast<ParcelEvent*>(obj.get());
+ notifyListener(MEDIA_TIMED_TEXT, &(parcelEvent->parcel));
+ } else {
+ notifyListener(MEDIA_TIMED_TEXT);
+ }
+ doRead();
+ break;
}
- if (mOutOfBandSource != NULL) {
- mOutOfBandSource.clear();
- mOutOfBandSource = NULL;
+ case kWhatSetSource: {
+ sp<RefBase> obj;
+ msg->findObject("source", &obj);
+ if (obj == NULL) break;
+ if (mSource != NULL) {
+ mSource->stop();
+ }
+ mSource = static_cast<TimedTextSource*>(obj.get());
+ mSource->start();
+ Parcel parcel;
+ if (mSource->extractGlobalDescriptions(&parcel) == OK &&
+ parcel.dataSize() > 0) {
+ notifyListener(MEDIA_TIMED_TEXT, &parcel);
+ } else {
+ notifyListener(MEDIA_TIMED_TEXT);
+ }
+ break;
}
}
}
-status_t TimedTextPlayer::seekTo(int64_t time_us) {
- Mutex::Autolock autoLock(mLock);
-
- mSeeking = true;
- mSeekTimeUs = time_us;
-
- postTextEvent();
-
- return OK;
-}
-
-status_t TimedTextPlayer::setTimedTextTrackIndex(int32_t index) {
- if (index >=
- (int)(mTextTrackVector.size() + mTextOutOfBandVector.size())) {
- return BAD_VALUE;
- }
-
- if (mStarted) {
- reset();
- }
-
- if (index >= 0) {
- return start(index);
- }
- return OK;
-}
-
-void TimedTextPlayer::onTextEvent() {
- Mutex::Autolock autoLock(mLock);
-
- if (!mTextEventPending) {
- return;
- }
- mTextEventPending = false;
-
- if (mData.dataSize() > 0) {
- notifyListener(MEDIA_TIMED_TEXT, &mData);
- mData.freeData();
- }
-
+void TimedTextPlayer::doSeekAndRead(int64_t seekTimeUs) {
MediaSource::ReadOptions options;
- if (mSeeking) {
- options.setSeekTo(mSeekTimeUs,
- MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
- mSeeking = false;
-
- notifyListener(MEDIA_TIMED_TEXT); //empty text to clear the screen
- }
-
- int64_t positionUs, timeUs;
- mObserver->getPosition(&positionUs);
-
- if (mTextType == kInBandText) {
- if (mSource->read(&mTextBuffer, &options) != OK) {
- return;
- }
-
- mTextBuffer->meta_data()->findInt64(kKeyTime, &timeUs);
- } else {
- int64_t endTimeUs;
- if (mTextParser->getText(
- &mText, &timeUs, &endTimeUs, &options) != OK) {
- return;
- }
- }
-
- if (timeUs > 0) {
- extractAndAppendLocalDescriptions(timeUs);
- }
-
- if (mTextType == kInBandText) {
- if (mTextBuffer != NULL) {
- mTextBuffer->release();
- mTextBuffer = NULL;
- }
- } else {
- mText.clear();
- }
-
- //send the text now
- if (timeUs <= positionUs + 100000ll) {
- postTextEvent();
- } else {
- postTextEvent(timeUs - positionUs - 100000ll);
- }
+ options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ doRead(&options);
}
-void TimedTextPlayer::postTextEvent(int64_t delayUs) {
- if (mTextEventPending) {
- return;
- }
-
- mTextEventPending = true;
- mQueue->postEventWithDelay(mTextEvent, delayUs < 0 ? 10000 : delayUs);
+void TimedTextPlayer::doRead(MediaSource::ReadOptions* options) {
+ int64_t timeUs = 0;
+ sp<ParcelEvent> parcelEvent = new ParcelEvent();
+ mSource->read(&timeUs, &(parcelEvent->parcel), options);
+ postTextEvent(parcelEvent, timeUs);
}
-void TimedTextPlayer::cancelTextEvent() {
- mQueue->cancelEvent(mTextEvent->eventID());
- mTextEventPending = false;
-}
+void TimedTextPlayer::postTextEvent(const sp<ParcelEvent>& parcel, int64_t timeUs) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ int64_t positionUs, delayUs;
+ int32_t positionMs = 0;
+ listener->getCurrentPosition(&positionMs);
+ positionUs = positionMs * 1000;
-void TimedTextPlayer::addTextSource(sp<MediaSource> source) {
- Mutex::Autolock autoLock(mLock);
- mTextTrackVector.add(source);
-}
-
-status_t TimedTextPlayer::setParameter(int key, const Parcel &request) {
- Mutex::Autolock autoLock(mLock);
-
- if (key == KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE) {
- const String16 uri16 = request.readString16();
- String8 uri = String8(uri16);
- KeyedVector<String8, String8> headers;
-
- // To support local subtitle file only for now
- if (strncasecmp("file://", uri.string(), 7)) {
- return INVALID_OPERATION;
- }
- sp<DataSource> dataSource =
- DataSource::CreateFromURI(uri, &headers);
- status_t err = dataSource->initCheck();
-
- if (err != OK) {
- return err;
- }
-
- OutOfBandText text;
- text.source = dataSource;
- if (uri.getPathExtension() == String8(".srt")) {
- text.type = TimedTextParser::OUT_OF_BAND_FILE_SRT;
+ if (timeUs <= positionUs + kAdjustmentProcessingTimeUs) {
+ delayUs = 0;
} else {
- return ERROR_UNSUPPORTED;
+ delayUs = timeUs - positionUs - kAdjustmentProcessingTimeUs;
}
-
- mTextOutOfBandVector.add(text);
-
- return OK;
+ sp<AMessage> msg = new AMessage(kWhatSendSubtitle, id());
+ msg->setInt32("generation", mSendSubtitleGeneration);
+ if (parcel != NULL) {
+ msg->setObject("subtitle", parcel);
+ }
+ msg->post(delayUs);
}
- return INVALID_OPERATION;
}
void TimedTextPlayer::notifyListener(int msg, const Parcel *parcel) {
- if (mListener != NULL) {
- sp<MediaPlayerBase> listener = mListener.promote();
-
- if (listener != NULL) {
- if (parcel && (parcel->dataSize() > 0)) {
- listener->sendEvent(msg, 0, 0, parcel);
- } else { // send an empty timed text to clear the screen
- listener->sendEvent(msg);
- }
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ if (parcel != NULL && (parcel->dataSize() > 0)) {
+ listener->sendEvent(msg, 0, 0, parcel);
+ } else { // send an empty timed text to clear the screen
+ listener->sendEvent(msg);
}
}
}
-// Each text sample consists of a string of text, optionally with sample
-// modifier description. The modifier description could specify a new
-// text style for the string of text. These descriptions are present only
-// if they are needed. This method is used to extract the modifier
-// description and append it at the end of the text.
-status_t TimedTextPlayer::extractAndAppendLocalDescriptions(int64_t timeUs) {
- const void *data;
- size_t size = 0;
- int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
-
- if (mTextType == kInBandText) {
- const char *mime;
- CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
- flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
- data = mTextBuffer->data();
- size = mTextBuffer->size();
- } else {
- // support 3GPP only for now
- return ERROR_UNSUPPORTED;
- }
- } else {
- data = mText.c_str();
- size = mText.size();
- flag |= TextDescriptions::OUT_OF_BAND_TEXT_SRT;
- }
-
- if ((size > 0) && (flag != TextDescriptions::LOCAL_DESCRIPTIONS)) {
- mData.freeData();
- return TextDescriptions::getParcelOfDescriptions(
- (const uint8_t *)data, size, flag, timeUs / 1000, &mData);
- }
-
- return OK;
-}
-
-// To extract and send the global text descriptions for all the text samples
-// in the text track or text file.
-status_t TimedTextPlayer::extractAndSendGlobalDescriptions() {
- const void *data;
- size_t size = 0;
- int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS;
-
- if (mTextType == kInBandText) {
- const char *mime;
- CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-
- // support 3GPP only for now
- if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
- uint32_t type;
- // get the 'tx3g' box content. This box contains the text descriptions
- // used to render the text track
- if (!mSource->getFormat()->findData(
- kKeyTextFormatData, &type, &data, &size)) {
- return ERROR_MALFORMED;
- }
-
- flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
- }
- }
-
- if ((size > 0) && (flag != TextDescriptions::GLOBAL_DESCRIPTIONS)) {
- Parcel parcel;
- if (TextDescriptions::getParcelOfDescriptions(
- (const uint8_t *)data, size, flag, 0, &parcel) == OK) {
- if (parcel.dataSize() > 0) {
- notifyListener(MEDIA_TIMED_TEXT, &parcel);
- }
- }
- }
-
- return OK;
-}
-}
+} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.h b/media/libstagefright/timedtext/TimedTextPlayer.h
index a744db5..837beeb 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.h
+++ b/media/libstagefright/timedtext/TimedTextPlayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -15,99 +15,61 @@
*/
#ifndef TIMEDTEXT_PLAYER_H_
-
#define TIMEDTEXT_PLAYER_H_
-#include <media/MediaPlayerInterface.h>
+#include <binder/Parcel.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/RefBase.h>
-#include "include/TimedEventQueue.h"
-#include "TimedTextParser.h"
+#include "TimedTextSource.h"
namespace android {
-class MediaSource;
-class AwesomePlayer;
-class MediaBuffer;
+class AMessage;
+class MediaPlayerBase;
+class TimedTextDriver;
+class TimedTextSource;
-class TimedTextPlayer {
+class TimedTextPlayer : public AHandler {
public:
- TimedTextPlayer(AwesomePlayer *observer,
- const wp<MediaPlayerBase> &listener,
- TimedEventQueue *queue);
+ TimedTextPlayer(const wp<MediaPlayerBase> &listener);
virtual ~TimedTextPlayer();
- // index: the index of the text track which will
- // be turned on
- status_t start(uint8_t index);
-
+ void start();
void pause();
-
void resume();
+ void seekToAsync(int64_t timeUs);
+ void setDataSource(sp<TimedTextSource> source);
- status_t seekTo(int64_t time_us);
-
- void addTextSource(sp<MediaSource> source);
-
- status_t setTimedTextTrackIndex(int32_t index);
- status_t setParameter(int key, const Parcel &request);
+protected:
+ virtual void onMessageReceived(const sp<AMessage> &msg);
private:
- enum TextType {
- kNoText = 0,
- kInBandText = 1,
- kOutOfBandText = 2,
+ enum {
+ kWhatPause = 'paus',
+ kWhatSeek = 'seek',
+ kWhatSendSubtitle = 'send',
+ kWhatSetSource = 'ssrc',
};
- Mutex mLock;
-
- sp<MediaSource> mSource;
- sp<DataSource> mOutOfBandSource;
-
- bool mSeeking;
- int64_t mSeekTimeUs;
-
- bool mStarted;
-
- sp<TimedEventQueue::Event> mTextEvent;
- bool mTextEventPending;
-
- TimedEventQueue *mQueue;
+ // To add Parcel into an AMessage as an object, it should be 'RefBase'.
+ struct ParcelEvent : public RefBase {
+ Parcel parcel;
+ };
wp<MediaPlayerBase> mListener;
- AwesomePlayer *mObserver;
+ sp<TimedTextSource> mSource;
+ int32_t mSendSubtitleGeneration;
- MediaBuffer *mTextBuffer;
- Parcel mData;
-
- // for in-band timed text
- Vector<sp<MediaSource> > mTextTrackVector;
-
- // for out-of-band timed text
- struct OutOfBandText {
- TimedTextParser::FileType type;
- sp<DataSource> source;
- };
- Vector<OutOfBandText > mTextOutOfBandVector;
-
- sp<TimedTextParser> mTextParser;
- AString mText;
-
- TextType mTextType;
-
- void reset();
-
+ void doSeekAndRead(int64_t seekTimeUs);
+ void doRead(MediaSource::ReadOptions* options = NULL);
void onTextEvent();
- void postTextEvent(int64_t delayUs = -1);
- void cancelTextEvent();
-
+ void postTextEvent(const sp<ParcelEvent>& parcel = NULL, int64_t timeUs = -1);
void notifyListener(int msg, const Parcel *parcel = NULL);
- status_t extractAndAppendLocalDescriptions(int64_t timeUs);
- status_t extractAndSendGlobalDescriptions();
-
DISALLOW_EVIL_CONSTRUCTORS(TimedTextPlayer);
};
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
new file mode 100644
index 0000000..3752d34
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
@@ -0,0 +1,275 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextSRTSource"
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+
+#include "TimedTextSRTSource.h"
+#include "TextDescriptions.h"
+
+namespace android {
+
+TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource)
+ : mSource(dataSource),
+ mIndex(0) {
+}
+
+TimedTextSRTSource::~TimedTextSRTSource() {
+}
+
+status_t TimedTextSRTSource::start() {
+ status_t err = scanFile();
+ if (err != OK) {
+ reset();
+ }
+ return err;
+}
+
+void TimedTextSRTSource::reset() {
+ mTextVector.clear();
+ mIndex = 0;
+}
+
+status_t TimedTextSRTSource::stop() {
+ reset();
+ return OK;
+}
+
+status_t TimedTextSRTSource::read(
+ int64_t *timeUs,
+ Parcel *parcel,
+ const MediaSource::ReadOptions *options) {
+ int64_t endTimeUs;
+ AString text;
+ status_t err = getText(options, &text, timeUs, &endTimeUs);
+ if (err != OK) {
+ return err;
+ }
+
+ if (*timeUs > 0) {
+ extractAndAppendLocalDescriptions(*timeUs, text, parcel);
+ }
+ return OK;
+}
+
+status_t TimedTextSRTSource::scanFile() {
+ off64_t offset = 0;
+ int64_t startTimeUs;
+ bool endOfFile = false;
+
+ while (!endOfFile) {
+ TextInfo info;
+ status_t err = getNextSubtitleInfo(&offset, &startTimeUs, &info);
+ switch (err) {
+ case OK:
+ mTextVector.add(startTimeUs, info);
+ break;
+ case ERROR_END_OF_STREAM:
+ endOfFile = true;
+ break;
+ default:
+ return err;
+ }
+ }
+ if (mTextVector.isEmpty()) {
+ return ERROR_MALFORMED;
+ }
+ return OK;
+}
+
+/* SRT format:
+ * Subtitle number
+ * Start time --> End time
+ * Text of subtitle (one or more lines)
+ * Blank lines
+ *
+ * .srt file example:
+ * 1
+ * 00:00:20,000 --> 00:00:24,400
+ * Altocumulus clouds occr between six thousand
+ *
+ * 2
+ * 00:00:24,600 --> 00:00:27,800
+ * and twenty thousand feet above ground level.
+ */
+status_t TimedTextSRTSource::getNextSubtitleInfo(
+ off64_t *offset, int64_t *startTimeUs, TextInfo *info) {
+ AString data;
+ status_t err;
+
+ // To skip blank lines.
+ do {
+ if ((err = readNextLine(offset, &data)) != OK) {
+ return err;
+ }
+ data.trim();
+ } while (data.empty());
+
+ // Just ignore the first non-blank line which is subtitle sequence number.
+ if ((err = readNextLine(offset, &data)) != OK) {
+ return err;
+ }
+ int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2;
+ // the start time format is: hours:minutes:seconds,milliseconds
+ // 00:00:24,600 --> 00:00:27,800
+ if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
+ &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) {
+ return ERROR_MALFORMED;
+ }
+
+ *startTimeUs = ((hour1 * 3600 + min1 * 60 + sec1) * 1000 + msec1) * 1000ll;
+ info->endTimeUs = ((hour2 * 3600 + min2 * 60 + sec2) * 1000 + msec2) * 1000ll;
+ if (info->endTimeUs <= *startTimeUs) {
+ return ERROR_MALFORMED;
+ }
+
+ info->offset = *offset;
+ bool needMoreData = true;
+ while (needMoreData) {
+ if ((err = readNextLine(offset, &data)) != OK) {
+ if (err == ERROR_END_OF_STREAM) {
+ needMoreData = false;
+ } else {
+ return err;
+ }
+ }
+
+ if (needMoreData) {
+ data.trim();
+ if (data.empty()) {
+ // it's an empty line used to separate two subtitles
+ needMoreData = false;
+ }
+ }
+ }
+ info->textLen = *offset - info->offset;
+ return OK;
+}
+
+status_t TimedTextSRTSource::readNextLine(off64_t *offset, AString *data) {
+ data->clear();
+ while (true) {
+ ssize_t readSize;
+ char character;
+ if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
+ if (readSize == 0) {
+ return ERROR_END_OF_STREAM;
+ }
+ return ERROR_IO;
+ }
+
+ (*offset)++;
+
+ // a line could end with CR, LF or CR + LF
+ if (character == 10) {
+ break;
+ } else if (character == 13) {
+ if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
+ if (readSize == 0) { // end of the stream
+ return OK;
+ }
+ return ERROR_IO;
+ }
+
+ (*offset)++;
+ if (character != 10) {
+ (*offset)--;
+ }
+ break;
+ }
+ data->append(character);
+ }
+ return OK;
+}
+
+status_t TimedTextSRTSource::getText(
+ const MediaSource::ReadOptions *options,
+ AString *text, int64_t *startTimeUs, int64_t *endTimeUs) {
+ text->clear();
+ int64_t seekTimeUs;
+ MediaSource::ReadOptions::SeekMode mode;
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
+ int64_t lastEndTimeUs =
+ mTextVector.valueAt(mTextVector.size() - 1).endTimeUs;
+ int64_t firstStartTimeUs = mTextVector.keyAt(0);
+ if (seekTimeUs < 0 || seekTimeUs > lastEndTimeUs) {
+ return ERROR_OUT_OF_RANGE;
+ } else if (seekTimeUs < firstStartTimeUs) {
+ mIndex = 0;
+ } else {
+ // binary search
+ ssize_t low = 0;
+ ssize_t high = mTextVector.size() - 1;
+ ssize_t mid = 0;
+ int64_t currTimeUs;
+
+ while (low <= high) {
+ mid = low + (high - low)/2;
+ currTimeUs = mTextVector.keyAt(mid);
+ const int diff = currTimeUs - seekTimeUs;
+
+ if (diff == 0) {
+ break;
+ } else if (diff < 0) {
+ low = mid + 1;
+ } else {
+ if ((high == mid + 1)
+ && (seekTimeUs < mTextVector.keyAt(high))) {
+ break;
+ }
+ high = mid - 1;
+ }
+ }
+ mIndex = mid;
+ }
+ }
+ const TextInfo &info = mTextVector.valueAt(mIndex);
+ *startTimeUs = mTextVector.keyAt(mIndex);
+ *endTimeUs = info.endTimeUs;
+ mIndex++;
+
+ char *str = new char[info.textLen];
+ if (mSource->readAt(info.offset, str, info.textLen) < info.textLen) {
+ delete[] str;
+ return ERROR_IO;
+ }
+ text->append(str, info.textLen);
+ delete[] str;
+ return OK;
+}
+
+status_t TimedTextSRTSource::extractAndAppendLocalDescriptions(
+ int64_t timeUs, const AString &text, Parcel *parcel) {
+ const void *data = text.c_str();
+ size_t size = text.size();
+ int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS |
+ TextDescriptions::OUT_OF_BAND_TEXT_SRT;
+
+ if (size > 0) {
+ return TextDescriptions::getParcelOfDescriptions(
+ (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
+ }
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.h b/media/libstagefright/timedtext/TimedTextSRTSource.h
new file mode 100644
index 0000000..a0734d9
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_SRT_SOURCE_H_
+#define TIMED_TEXT_SRT_SOURCE_H_
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/Compat.h> // off64_t
+
+#include "TimedTextSource.h"
+
+namespace android {
+
+class AString;
+class DataSource;
+class MediaBuffer;
+class Parcel;
+
+class TimedTextSRTSource : public TimedTextSource {
+ public:
+ TimedTextSRTSource(const sp<DataSource>& dataSource);
+ virtual status_t start();
+ virtual status_t stop();
+ virtual status_t read(
+ int64_t *timeUs,
+ Parcel *parcel,
+ const MediaSource::ReadOptions *options = NULL);
+
+ protected:
+ virtual ~TimedTextSRTSource();
+
+ private:
+ sp<DataSource> mSource;
+
+ struct TextInfo {
+ int64_t endTimeUs;
+ // The offset of the text in the original file.
+ off64_t offset;
+ int textLen;
+ };
+
+ int mIndex;
+ KeyedVector<int64_t, TextInfo> mTextVector;
+
+ void reset();
+ status_t scanFile();
+ status_t getNextSubtitleInfo(
+ off64_t *offset, int64_t *startTimeUs, TextInfo *info);
+ status_t readNextLine(off64_t *offset, AString *data);
+ status_t getText(
+ const MediaSource::ReadOptions *options,
+ AString *text, int64_t *startTimeUs, int64_t *endTimeUs);
+ status_t extractAndAppendLocalDescriptions(
+ int64_t timeUs, const AString &text, Parcel *parcel);
+
+ DISALLOW_EVIL_CONSTRUCTORS(TimedTextSRTSource);
+};
+
+} // namespace android
+
+#endif // TIMED_TEXT_SRT_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextSource.cpp b/media/libstagefright/timedtext/TimedTextSource.cpp
new file mode 100644
index 0000000..9efe67c
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSource.cpp
@@ -0,0 +1,53 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextSource"
+#include <utils/Log.h>
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaSource.h>
+
+#include "TimedTextSource.h"
+
+#include "TimedTextInBandSource.h"
+#include "TimedTextSRTSource.h"
+
+namespace android {
+
+// static
+sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
+ const sp<MediaSource>& mediaSource) {
+ return new TimedTextInBandSource(mediaSource);
+}
+
+// static
+sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
+ const sp<DataSource>& dataSource, FileType filetype) {
+ switch(filetype) {
+ case OUT_OF_BAND_FILE_SRT:
+ return new TimedTextSRTSource(dataSource);
+ case OUT_OF_BAND_FILE_SMI:
+ // TODO: Implement for SMI.
+ ALOGE("Supporting SMI is not implemented yet");
+ break;
+ default:
+ ALOGE("Undefined subtitle format. : %d", filetype);
+ }
+ return NULL;
+}
+
+} // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSource.h b/media/libstagefright/timedtext/TimedTextSource.h
new file mode 100644
index 0000000..06bae71
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSource.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_SOURCE_H_
+#define TIMED_TEXT_SOURCE_H_
+
+#include <media/stagefright/foundation/ABase.h> // for DISALLOW_XXX macro.
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h> // for MediaSource::ReadOptions
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DataSource;
+class Parcel;
+
+class TimedTextSource : public RefBase {
+ public:
+ enum FileType {
+ OUT_OF_BAND_FILE_SRT = 1,
+ OUT_OF_BAND_FILE_SMI = 2,
+ };
+ static sp<TimedTextSource> CreateTimedTextSource(
+ const sp<MediaSource>& source);
+ static sp<TimedTextSource> CreateTimedTextSource(
+ const sp<DataSource>& source, FileType filetype);
+ TimedTextSource() {}
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ // Returns subtitle parcel and its start time.
+ virtual status_t read(
+ int64_t *timeUs,
+ Parcel *parcel,
+ const MediaSource::ReadOptions *options = NULL) = 0;
+ virtual status_t extractGlobalDescriptions(Parcel *parcel) {
+ return INVALID_OPERATION;
+ }
+
+ protected:
+ virtual ~TimedTextSource() { }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(TimedTextSource);
+};
+
+} // namespace android
+
+#endif // TIMED_TEXT_SOURCE_H_
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 2d856ad..7e2f6be 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -160,7 +160,10 @@
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
- mPrimaryHardwareDev(NULL), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
+ mPrimaryHardwareDev(NULL),
+ mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
+ mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
+ mMode(AUDIO_MODE_INVALID),
mBtNrecIsOff(false)
{
}
@@ -172,7 +175,6 @@
Mutex::Autolock _l(mLock);
/* TODO: move all this work into an Init() function */
- mHardwareStatus = AUDIO_HW_IDLE;
for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
const hw_module_t *mod;
@@ -265,13 +267,10 @@
result.append("Clients:\n");
for (size_t i = 0; i < mClients.size(); ++i) {
- wp<Client> wClient = mClients.valueAt(i);
- if (wClient != 0) {
- sp<Client> client = wClient.promote();
- if (client != 0) {
- snprintf(buffer, SIZE, " pid: %d\n", client->pid());
- result.append(buffer);
- }
+ sp<Client> client = mClients.valueAt(i).promote();
+ if (client != 0) {
+ snprintf(buffer, SIZE, " pid: %d\n", client->pid());
+ result.append(buffer);
}
}
@@ -971,7 +970,8 @@
{
size_t size = mNotificationClients.size();
for (size_t i = 0; i < size; i++) {
- mNotificationClients.valueAt(i)->client()->ioConfigChanged(event, ioHandle, param2);
+ mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioHandle,
+ param2);
}
}
@@ -985,13 +985,19 @@
// ----------------------------------------------------------------------------
-AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, int id, uint32_t device)
+AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, int id, uint32_t device,
+ type_t type)
: Thread(false),
- mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mChannelCount(0),
- mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID), mStandby(false), mId(id), mExiting(false),
- mDevice(device)
+ mType(type),
+ mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0),
+ // mChannelMask
+ mChannelCount(0),
+ mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
+ mParamStatus(NO_ERROR),
+ mStandby(false), mId(id), mExiting(false),
+ mDevice(device),
+ mDeathRecipient(new PMDeathRecipient(this))
{
- mDeathRecipient = new PMDeathRecipient(this);
}
AudioFlinger::ThreadBase::~ThreadBase()
@@ -1372,20 +1378,24 @@
AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output,
int id,
- uint32_t device)
- : ThreadBase(audioFlinger, id, device),
- mMixBuffer(NULL), mSuspended(0), mBytesWritten(0), mOutput(output),
+ uint32_t device,
+ type_t type)
+ : ThreadBase(audioFlinger, id, device, type),
+ mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+ // Assumes constructor is called by AudioFlinger with it's mLock held,
+ // but it would be safer to explicitly pass initial masterMute as parameter
+ mMasterMute(audioFlinger->masterMute_l()),
+ // mStreamTypes[] initialized in constructor body
+ mOutput(output),
+ // Assumes constructor is called by AudioFlinger with it's mLock held,
+ // but it would be safer to explicitly pass initial masterVolume as parameter
+ mMasterVolume(audioFlinger->masterVolume_l()),
mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false)
{
snprintf(mName, kNameLength, "AudioOut_%d", id);
readOutputParameters();
- // Assumes constructor is called by AudioFlinger with it's mLock held,
- // but it would be safer to explicitly pass these as parameters
- mMasterVolume = mAudioFlinger->masterVolume_l();
- mMasterMute = mAudioFlinger->masterMute_l();
-
// mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
// There is no AUDIO_STREAM_MIN, and ++ operator does not compile
for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
@@ -1431,13 +1441,10 @@
result.append(buffer);
result.append(" Name Clien Typ Fmt Chn mask Session Buf S M F SRate LeftV RighV Serv User Main buf Aux Buf\n");
for (size_t i = 0; i < mActiveTracks.size(); ++i) {
- wp<Track> wTrack = mActiveTracks[i];
- if (wTrack != 0) {
- sp<Track> track = wTrack.promote();
- if (track != 0) {
- track->dump(buffer, SIZE);
- result.append(buffer);
- }
+ sp<Track> track = mActiveTracks[i].promote();
+ if (track != 0) {
+ track->dump(buffer, SIZE);
+ result.append(buffer);
}
}
write(fd, result.string(), result.size());
@@ -1705,7 +1712,7 @@
// audioConfigChanged_l() must be called with AudioFlinger::mLock held
void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) {
AudioSystem::OutputDescriptor desc;
- void *param2 = 0;
+ void *param2 = NULL;
ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, param);
@@ -1740,7 +1747,7 @@
// FIXME - Current mixer implementation only supports stereo output: Always
// Allocate a stereo buffer even if HW output is mono.
- if (mMixBuffer != NULL) delete[] mMixBuffer;
+ delete[] mMixBuffer;
mMixBuffer = new int16_t[mFrameCount * 2];
memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));
@@ -1758,7 +1765,7 @@
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
{
- if (halFrames == 0 || dspFrames == 0) {
+ if (halFrames == NULL || dspFrames == NULL) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
@@ -1845,13 +1852,12 @@
// ----------------------------------------------------------------------------
-AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
- : PlaybackThread(audioFlinger, output, id, device),
- mAudioMixer(NULL), mPrevMixerStatus(MIXER_IDLE)
+AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+ int id, uint32_t device, type_t type)
+ : PlaybackThread(audioFlinger, output, id, device, type),
+ mAudioMixer(new AudioMixer(mFrameCount, mSampleRate)),
+ mPrevMixerStatus(MIXER_IDLE)
{
- mType = ThreadBase::MIXER;
- mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
-
// FIXME - Current mixer implementation only supports stereo output
if (mChannelCount == 1) {
ALOGE("Invalid audio hardware channel count");
@@ -2193,7 +2199,7 @@
// read original volumes with volume control
float typeVolume = mStreamTypes[track->type()].volume;
float v = masterVolume * typeVolume;
- uint32_t vlr = cblk->volumeLR;
+ uint32_t vlr = cblk->getVolumeLR();
vl = vlr & 0xFFFF;
vr = vlr >> 16;
// track volumes come from shared memory, so can't be trusted and must be clamped
@@ -2461,6 +2467,8 @@
}
if (status == NO_ERROR && reconfig) {
delete mAudioMixer;
+ // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
+ mAudioMixer = NULL;
readOutputParameters();
mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
for (size_t i = 0; i < mTracks.size() ; i++) {
@@ -2513,9 +2521,10 @@
// ----------------------------------------------------------------------------
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
- : PlaybackThread(audioFlinger, output, id, device)
+ : PlaybackThread(audioFlinger, output, id, device, DIRECT)
+ // mLeftVolFloat, mRightVolFloat
+ // mLeftVolShort, mRightVolShort
{
- mType = ThreadBase::DIRECT;
}
AudioFlinger::DirectOutputThread::~DirectOutputThread()
@@ -2729,7 +2738,7 @@
} else {
float typeVolume = mStreamTypes[track->type()].volume;
float v = mMasterVolume * typeVolume;
- uint32_t vlr = cblk->volumeLR;
+ uint32_t vlr = cblk->getVolumeLR();
float v_clamped = v * (vlr & 0xFFFF);
if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
left = v_clamped/MAX_GAIN;
@@ -2992,10 +3001,11 @@
// ----------------------------------------------------------------------------
-AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, AudioFlinger::MixerThread* mainThread, int id)
- : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device()), mWaitTimeMs(UINT_MAX)
+AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
+ AudioFlinger::MixerThread* mainThread, int id)
+ : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device(), DUPLICATING),
+ mWaitTimeMs(UINT_MAX)
{
- mType = ThreadBase::DUPLICATING;
addOutputTrack(mainThread);
}
@@ -3244,13 +3254,17 @@
: RefBase(),
mThread(thread),
mClient(client),
- mCblk(0),
+ mCblk(NULL),
+ // mBuffer
+ // mBufferEnd
mFrameCount(0),
mState(IDLE),
mClientTid(-1),
mFormat(format),
mFlags(flags & ~SYSTEM_FLAGS_MASK),
mSessionId(sessionId)
+ // mChannelCount
+ // mChannelMask
{
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
@@ -3266,7 +3280,7 @@
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory != 0) {
mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
- if (mCblk) { // construct the shared structure in-place.
+ if (mCblk != NULL) { // construct the shared structure in-place.
new(mCblk) audio_track_cblk_t();
// clear all buffers
mCblk->frameCount = frameCount;
@@ -3309,7 +3323,7 @@
AudioFlinger::ThreadBase::TrackBase::~TrackBase()
{
- if (mCblk) {
+ if (mCblk != NULL) {
mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
if (mClient == NULL) {
delete mCblk;
@@ -3317,6 +3331,7 @@
}
mCblkMemory.clear(); // and free the shared memory
if (mClient != NULL) {
+ // Client destructor must run with AudioFlinger mutex locked
Mutex::Autolock _l(mClient->audioFlinger()->mLock);
mClient.clear();
}
@@ -3383,7 +3398,7 @@
server %d, serverBase %d, user %d, userBase %d",
bufferStart, bufferEnd, mBuffer, mBufferEnd,
cblk->server, cblk->serverBase, cblk->user, cblk->userBase);
- return 0;
+ return NULL;
}
return bufferStart;
@@ -3468,7 +3483,7 @@
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
{
- uint32_t vlr = mCblk->volumeLR;
+ uint32_t vlr = mCblk->getVolumeLR();
snprintf(buffer, size, " %05d %05d %03u %03u 0x%08x %05u %04u %1d %1d %1d %05u %05u %05u 0x%08x 0x%08x 0x%08x 0x%08x\n",
mName - AudioMixer::TRACK0,
(mClient == NULL) ? getpid() : mClient->pid(),
@@ -3827,7 +3842,6 @@
if (mCblk != NULL) {
mCblk->flags |= CBLK_DIRECTION_OUT;
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
- mCblk->volumeLR = (MAX_GAIN_INT << 16) | MAX_GAIN_INT;
mOutBuffer.frameCount = 0;
playbackThread->mTracks.add(this);
ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, " \
@@ -4065,7 +4079,7 @@
mAudioFlinger->removeClient_l(mPid);
}
-const sp<MemoryDealer>& AudioFlinger::Client::heap() const
+sp<MemoryDealer> AudioFlinger::Client::heap() const
{
return mMemoryDealer;
}
@@ -4075,13 +4089,12 @@
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<IAudioFlingerClient>& client,
pid_t pid)
- : mAudioFlinger(audioFlinger), mPid(pid), mClient(client)
+ : mAudioFlinger(audioFlinger), mPid(pid), mAudioFlingerClient(client)
{
}
AudioFlinger::NotificationClient::~NotificationClient()
{
- mClient.clear();
}
void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
@@ -4266,15 +4279,16 @@
uint32_t channels,
int id,
uint32_t device) :
- ThreadBase(audioFlinger, id, device),
- mInput(input), mTrack(NULL), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL)
+ ThreadBase(audioFlinger, id, device, RECORD),
+ mInput(input), mTrack(NULL), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
+ // mRsmpInIndex and mInputBytes set by readInputParameters()
+ mReqChannelCount(popcount(channels)),
+ mReqSampleRate(sampleRate)
+ // mBytesRead is only meaningful while active, and so is cleared in start()
+ // (but might be better to also clear here for dump?)
{
- mType = ThreadBase::RECORD;
-
snprintf(mName, kNameLength, "AudioIn_%d", id);
- mReqChannelCount = popcount(channels);
- mReqSampleRate = sampleRate;
readInputParameters();
}
@@ -4282,10 +4296,8 @@
AudioFlinger::RecordThread::~RecordThread()
{
delete[] mRsmpInBuffer;
- if (mResampler != NULL) {
- delete mResampler;
- delete[] mRsmpOutBuffer;
- }
+ delete mResampler;
+ delete[] mRsmpOutBuffer;
}
void AudioFlinger::RecordThread::onFirstRef()
@@ -4807,7 +4819,7 @@
void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
AudioSystem::OutputDescriptor desc;
- void *param2 = 0;
+ void *param2 = NULL;
switch (event) {
case AudioSystem::INPUT_OPENED:
@@ -4829,9 +4841,11 @@
void AudioFlinger::RecordThread::readInputParameters()
{
- if (mRsmpInBuffer) delete mRsmpInBuffer;
- if (mRsmpOutBuffer) delete mRsmpOutBuffer;
- if (mResampler) delete mResampler;
+ delete mRsmpInBuffer;
+ // mRsmpInBuffer is always assigned a new[] below
+ delete mRsmpOutBuffer;
+ mRsmpOutBuffer = NULL;
+ delete mResampler;
mResampler = NULL;
mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
@@ -4983,10 +4997,10 @@
}
mPlaybackThreads.add(id, thread);
- if (pSamplingRate) *pSamplingRate = samplingRate;
- if (pFormat) *pFormat = format;
- if (pChannels) *pChannels = channels;
- if (pLatencyMs) *pLatencyMs = thread->latency();
+ if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+ if (pFormat != NULL) *pFormat = format;
+ if (pChannels != NULL) *pChannels = channels;
+ if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
// notify client processes of the new output creation
thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
@@ -5038,7 +5052,7 @@
}
}
}
- void *param2 = 0;
+ void *param2 = NULL;
audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, param2);
mPlaybackThreads.removeItem(output);
}
@@ -5154,9 +5168,9 @@
device);
mRecordThreads.add(id, thread);
ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
- if (pSamplingRate) *pSamplingRate = reqSamplingRate;
- if (pFormat) *pFormat = format;
- if (pChannels) *pChannels = reqChannels;
+ if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
+ if (pFormat != NULL) *pFormat = format;
+ if (pChannels != NULL) *pChannels = reqChannels;
input->stream->common.standby(&input->stream->common);
@@ -5181,7 +5195,7 @@
}
ALOGV("closeInput() %d", input);
- void *param2 = 0;
+ void *param2 = NULL;
audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, param2);
mRecordThreads.removeItem(input);
}
@@ -5243,12 +5257,8 @@
return;
}
}
- AudioSessionRef *ref = new AudioSessionRef();
- ref->sessionid = audioSession;
- ref->pid = caller;
- ref->cnt = 1;
- mAudioSessionRefs.push(ref);
- ALOGV(" added new entry for %d", ref->sessionid);
+ mAudioSessionRefs.push(new AudioSessionRef(audioSession, caller));
+ ALOGV(" added new entry for %d", audioSession);
}
void AudioFlinger::releaseAudioSessionId(int audioSession)
@@ -5792,7 +5802,7 @@
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
lStatus = effect->addHandle(handle);
- if (enabled) {
+ if (enabled != NULL) {
*enabled = (int)effect->isEnabled();
}
}
@@ -6179,7 +6189,7 @@
}
}
-status_t AudioFlinger::EffectModule::addHandle(sp<EffectHandle>& handle)
+status_t AudioFlinger::EffectModule::addHandle(const sp<EffectHandle>& handle)
{
status_t status;
@@ -6226,7 +6236,7 @@
bool enabled = false;
EffectHandle *hdl = handle.unsafe_get();
- if (hdl) {
+ if (hdl != NULL) {
ALOGV("removeHandle() unsafe_get OK");
enabled = hdl->enabled();
}
@@ -6862,7 +6872,7 @@
if (mCblkMemory != 0) {
mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer());
- if (mCblk) {
+ if (mCblk != NULL) {
new(mCblk) effect_param_cblk_t();
mBuffer = (uint8_t *)mCblk + bufOffset;
}
@@ -6959,7 +6969,7 @@
// release sp on module => module destructor can be called now
mEffect.clear();
if (mClient != 0) {
- if (mCblk) {
+ if (mCblk != NULL) {
mCblk->~effect_param_cblk_t(); // destroy our shared-structure.
}
mCblkMemory.clear(); // and free the shared memory
@@ -7089,7 +7099,7 @@
void AudioFlinger::EffectHandle::dump(char* buffer, size_t size)
{
- bool locked = mCblk ? tryLock(mCblk->lock) : false;
+ bool locked = mCblk != NULL && tryLock(mCblk->lock);
snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n",
(mClient == NULL) ? getpid() : mClient->pid(),
@@ -7551,7 +7561,8 @@
ALOGV("setEffectSuspendedAll_l() add entry for 0");
}
if (desc->mRefCount++ == 0) {
- Vector< sp<EffectModule> > effects = getSuspendEligibleEffects();
+ Vector< sp<EffectModule> > effects;
+ getSuspendEligibleEffects(effects);
for (size_t i = 0; i < effects.size(); i++) {
setEffectSuspended_l(&effects[i]->desc().type, true);
}
@@ -7602,16 +7613,14 @@
return true;
}
-Vector< sp<AudioFlinger::EffectModule> > AudioFlinger::EffectChain::getSuspendEligibleEffects()
+void AudioFlinger::EffectChain::getSuspendEligibleEffects(Vector< sp<AudioFlinger::EffectModule> > &effects)
{
- Vector< sp<EffectModule> > effects;
+ effects.clear();
for (size_t i = 0; i < mEffects.size(); i++) {
- if (!isEffectEligibleForSuspend(mEffects[i]->desc())) {
- continue;
+ if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
+ effects.add(mEffects[i]);
}
- effects.add(mEffects[i]);
}
- return effects;
}
sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 766ba44..4156da8 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -226,16 +226,16 @@
public:
Client(const sp<AudioFlinger>& audioFlinger, pid_t pid);
virtual ~Client();
- const sp<MemoryDealer>& heap() const;
+ sp<MemoryDealer> heap() const;
pid_t pid() const { return mPid; }
sp<AudioFlinger> audioFlinger() { return mAudioFlinger; }
private:
Client(const Client&);
Client& operator = (const Client&);
- sp<AudioFlinger> mAudioFlinger;
- sp<MemoryDealer> mMemoryDealer;
- pid_t mPid;
+ const sp<AudioFlinger> mAudioFlinger;
+ const sp<MemoryDealer> mMemoryDealer;
+ const pid_t mPid;
};
// --- Notification Client ---
@@ -246,7 +246,7 @@
pid_t pid);
virtual ~NotificationClient();
- sp<IAudioFlingerClient> client() { return mClient; }
+ sp<IAudioFlingerClient> audioFlingerClient() const { return mAudioFlingerClient; }
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -255,9 +255,9 @@
NotificationClient(const NotificationClient&);
NotificationClient& operator = (const NotificationClient&);
- sp<AudioFlinger> mAudioFlinger;
- pid_t mPid;
- sp<IAudioFlingerClient> mClient;
+ const sp<AudioFlinger> mAudioFlinger;
+ const pid_t mPid;
+ const sp<IAudioFlingerClient> mAudioFlingerClient;
};
class TrackHandle;
@@ -277,17 +277,17 @@
class ThreadBase : public Thread {
public:
- ThreadBase (const sp<AudioFlinger>& audioFlinger, int id, uint32_t device);
- virtual ~ThreadBase();
-
- enum type {
+ enum type_t {
MIXER, // Thread class is MixerThread
DIRECT, // Thread class is DirectOutputThread
DUPLICATING, // Thread class is DuplicatingThread
RECORD // Thread class is RecordThread
};
+ ThreadBase (const sp<AudioFlinger>& audioFlinger, int id, uint32_t device, type_t type);
+ virtual ~ThreadBase();
+
status_t dumpBase(int fd, const Vector<String16>& args);
status_t dumpEffectChains(int fd, const Vector<String16>& args);
@@ -367,8 +367,8 @@
bool step();
void reset();
- wp<ThreadBase> mThread;
- sp<Client> mClient;
+ const wp<ThreadBase> mThread;
+ /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk;
void* mBuffer;
@@ -377,9 +377,9 @@
// we don't really need a lock for these
track_state mState;
int mClientTid;
- audio_format_t mFormat;
+ const audio_format_t mFormat;
uint32_t mFlags;
- int mSessionId;
+ const int mSessionId;
uint8_t mChannelCount;
uint32_t mChannelMask;
};
@@ -408,7 +408,7 @@
};
virtual status_t initCheck() const = 0;
- int type() const { return mType; }
+ type_t type() const { return mType; }
uint32_t sampleRate() const;
int channelCount() const;
audio_format_t format() const;
@@ -530,9 +530,9 @@
friend class RecordThread;
friend class RecordTrack;
- int mType;
+ const type_t mType;
Condition mWaitWorkCV;
- sp<AudioFlinger> mAudioFlinger;
+ const sp<AudioFlinger> mAudioFlinger;
uint32_t mSampleRate;
size_t mFrameCount;
uint32_t mChannelMask;
@@ -553,7 +553,7 @@
char mName[kNameLength];
sp<IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
- sp<PMDeathRecipient> mDeathRecipient;
+ const sp<PMDeathRecipient> mDeathRecipient;
// list of suspended effects per session and per type. The first vector is
// keyed by session ID, the second by type UUID timeLow field
KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > mSuspendedSessions;
@@ -671,7 +671,7 @@
bool write(int16_t* data, uint32_t frames);
bool bufferQueueEmpty() { return (mBufferQueue.size() == 0) ? true : false; }
bool isActive() { return mActive; }
- wp<ThreadBase>& thread() { return mThread; }
+ const wp<ThreadBase>& thread() { return mThread; }
private:
@@ -688,10 +688,11 @@
Vector < Buffer* > mBufferQueue;
AudioBufferProvider::Buffer mOutBuffer;
bool mActive;
- DuplicatingThread* mSourceThread;
+ DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
}; // end of OutputTrack
- PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device);
+ PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id,
+ uint32_t device, type_t type);
virtual ~PlaybackThread();
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -817,7 +818,8 @@
MixerThread (const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output,
int id,
- uint32_t device);
+ uint32_t device,
+ type_t type = MIXER);
virtual ~MixerThread();
// Thread virtuals
@@ -917,7 +919,7 @@
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
private:
- sp<PlaybackThread::Track> mTrack;
+ const sp<PlaybackThread::Track> mTrack;
};
friend class Client;
@@ -1021,8 +1023,8 @@
int16_t *mRsmpInBuffer;
size_t mRsmpInIndex;
size_t mInputBytes;
- int mReqChannelCount;
- uint32_t mReqSampleRate;
+ const int mReqChannelCount;
+ const uint32_t mReqSampleRate;
ssize_t mBytesRead;
};
@@ -1036,7 +1038,7 @@
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
private:
- sp<RecordThread::RecordTrack> mRecordTrack;
+ const sp<RecordThread::RecordTrack> mRecordTrack;
};
//--- Audio Effect Management
@@ -1105,9 +1107,9 @@
int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; }
void setChain(const wp<EffectChain>& chain) { mChain = chain; }
void setThread(const wp<ThreadBase>& thread) { mThread = thread; }
- wp<ThreadBase>& thread() { return mThread; }
+ const wp<ThreadBase>& thread() { return mThread; }
- status_t addHandle(sp<EffectHandle>& handle);
+ status_t addHandle(const sp<EffectHandle>& handle);
void disconnect(const wp<EffectHandle>& handle, bool unpiniflast);
size_t removeHandle (const wp<EffectHandle>& handle);
@@ -1325,7 +1327,8 @@
// get a list of effect modules to suspend when an effect of the type
// passed is enabled.
- Vector< sp<EffectModule> > getSuspendEligibleEffects();
+ void getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects);
+
// get an effect module if it is currently enable
sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type);
// true if the effect whose descriptor is passed can be suspended
@@ -1377,8 +1380,11 @@
};
struct AudioSessionRef {
- int sessionid;
- pid_t pid;
+ // FIXME rename parameter names when fields get "m" prefix
+ AudioSessionRef(int sessionid_, pid_t pid_) :
+ sessionid(sessionid_), pid(pid_), cnt(1) {}
+ const int sessionid;
+ const pid_t pid;
int cnt;
};
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index a8102e5..0b9f8ba 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -48,9 +48,10 @@
mState.enabledTracks= 0;
mState.needsChanged = 0;
mState.frameCount = frameCount;
+ mState.hook = process__nop;
mState.outputTemp = NULL;
mState.resampleTemp = NULL;
- mState.hook = process__nop;
+ // mState.reserved
track_t* t = mState.tracks;
for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
t->needs = 0;
@@ -70,12 +71,13 @@
t->enabled = 0;
t->format = 16;
t->channelMask = AUDIO_CHANNEL_OUT_STEREO;
- t->buffer.raw = 0;
t->bufferProvider = NULL;
+ t->buffer.raw = NULL;
+ // t->buffer.frameCount
t->hook = NULL;
+ t->in = NULL;
t->resampler = NULL;
t->sampleRate = mSampleRate;
- t->in = NULL;
t->mainBuffer = NULL;
t->auxBuffer = NULL;
t++;
@@ -123,7 +125,7 @@
track.enabled = 0;
invalidateState(1<<name);
}
- if (track.resampler) {
+ if (track.resampler != NULL) {
// delete the resampler
delete track.resampler;
track.resampler = NULL;
@@ -807,7 +809,7 @@
while (outFrames) {
t1.buffer.frameCount = outFrames;
t1.bufferProvider->getNextBuffer(&t1.buffer);
- if (!t1.buffer.raw) break;
+ if (t1.buffer.raw == NULL) break;
outFrames -= t1.buffer.frameCount;
t1.bufferProvider->releaseBuffer(&t1.buffer);
}
@@ -1127,9 +1129,7 @@
}
}
- if (buff != NULL) {
- delete [] buff;
- }
+ delete [] buff;
}
#endif
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 30bbabd..0f2f544 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -144,9 +144,9 @@
}
mInputs.clear();
- if (mpAudioPolicy && mpAudioPolicyDev)
+ if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL)
mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
- if (mpAudioPolicyDev)
+ if (mpAudioPolicyDev != NULL)
audio_policy_dev_close(mpAudioPolicyDev);
}
@@ -287,7 +287,7 @@
mpAudioPolicy->release_output(mpAudioPolicy, output);
}
-audio_io_handle_t AudioPolicyService::getInput(int inputSource,
+audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
uint32_t samplingRate,
audio_format_t format,
uint32_t channels,
@@ -297,6 +297,10 @@
if (mpAudioPolicy == NULL) {
return 0;
}
+ // already checked by client, but double-check in case the client wrapper is bypassed
+ if (uint32_t(inputSource) >= AUDIO_SOURCE_CNT) {
+ return 0;
+ }
Mutex::Autolock _l(mLock);
audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
format, channels, acoustics);
@@ -305,7 +309,7 @@
return input;
}
// create audio pre processors according to input source
- ssize_t index = mInputSources.indexOfKey((audio_source_t)inputSource);
+ ssize_t index = mInputSources.indexOfKey(inputSource);
if (index < 0) {
return input;
}
@@ -645,7 +649,7 @@
release_wake_lock(mName.string());
}
mAudioCommands.clear();
- if (mpToneGenerator != NULL) delete mpToneGenerator;
+ delete mpToneGenerator;
}
void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -678,8 +682,7 @@
ToneData *data = (ToneData *)command->mParam;
ALOGV("AudioCommandThread() processing start tone %d on stream %d",
data->mType, data->mStream);
- if (mpToneGenerator != NULL)
- delete mpToneGenerator;
+ delete mpToneGenerator;
mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
mpToneGenerator->startTone(data->mType);
delete data;
@@ -1157,7 +1160,7 @@
if (param == NULL && value == NULL) {
// try to parse simple parameter form {int int}
param = root->first_child;
- if (param) {
+ if (param != NULL) {
// Note: that a pair of random strings is read as 0 0
int *ptr = (int *)fx_param->data;
int *ptr2 = (int *)((char *)param + sizeof(effect_param_t));
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 65ff6ef..6597bf8 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -74,7 +74,7 @@
audio_stream_type_t stream,
int session = 0);
virtual void releaseOutput(audio_io_handle_t output);
- virtual audio_io_handle_t getInput(int inputSource,
+ virtual audio_io_handle_t getInput(audio_source_t inputSource,
uint32_t samplingRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,