Merge "Do not wait forever for output buffers in OMXCodec.cpp and error out in case time out happens"
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index dd1c275..34f0a64 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -803,6 +803,7 @@
printf("type '%s':\n", kMimeTypes[k]);
Vector<CodecCapabilities> results;
+ // will retrieve hardware and software codecs
CHECK_EQ(QueryCodecs(omx, kMimeTypes[k],
true, // queryDecoders
&results), (status_t)OK);
@@ -844,7 +845,12 @@
for (List<IOMX::ComponentInfo>::iterator it = list.begin();
it != list.end(); ++it) {
- printf("%s\n", (*it).mName.string());
+ printf("%s\t Roles: ", (*it).mName.string());
+ for (List<String8>::iterator itRoles = (*it).mRoles.begin() ;
+ itRoles != (*it).mRoles.end() ; ++itRoles) {
+ printf("%s\t", (*itRoles).string());
+ }
+ printf("\n");
}
}
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index dd93fd8..496b23e 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -188,7 +188,7 @@
* sessionID: audio session this effect is associated to. If 0, the effect will be global to
* the output mix. If not 0, the effect will be applied to all players
* (AudioTrack or MediaPLayer) within the same audio session.
- * output: HAL audio output stream to which this effect must be attached. Leave at 0 for
+ * io: HAL audio output or input stream to which this effect must be attached. Leave at 0 for
* automatic output selection by AudioFlinger.
*/
@@ -198,7 +198,7 @@
effect_callback_t cbf = 0,
void* user = 0,
int sessionId = 0,
- audio_io_handle_t output = 0
+ audio_io_handle_t io = 0
);
/* Constructor.
@@ -210,7 +210,7 @@
effect_callback_t cbf = 0,
void* user = 0,
int sessionId = 0,
- audio_io_handle_t output = 0
+ audio_io_handle_t io = 0
);
/* Terminates the AudioEffect and unregisters it from AudioFlinger.
@@ -232,7 +232,7 @@
effect_callback_t cbf = 0,
void* user = 0,
int sessionId = 0,
- audio_io_handle_t output = 0
+ audio_io_handle_t io = 0
);
/* Result of constructing the AudioEffect. This must be checked
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 89213b7..f20e234 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -160,7 +160,8 @@
uint32_t samplingRate = 0,
uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = AUDIO_CHANNEL_IN_MONO,
- audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0);
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0,
+ int sessionId = 0);
static status_t startInput(audio_io_handle_t input);
static status_t stopInput(audio_io_handle_t input);
static void releaseInput(audio_io_handle_t input);
@@ -175,7 +176,7 @@
static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
static status_t registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id);
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 0fc8dbf..86b9f85 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -65,7 +65,8 @@
uint32_t samplingRate = 0,
uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0) = 0;
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0,
+ int audioSession = 0) = 0;
virtual status_t startInput(audio_io_handle_t input) = 0;
virtual status_t stopInput(audio_io_handle_t input) = 0;
virtual void releaseInput(audio_io_handle_t input) = 0;
@@ -78,7 +79,7 @@
virtual uint32_t getDevicesForStream(audio_stream_type_t stream) = 0;
virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
virtual status_t registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id) = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index a73267d..007aea6 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -26,6 +26,7 @@
class ICamera;
class ICameraRecordingProxy;
class IMediaRecorderClient;
+class ISurfaceTexture;
class IMediaRecorder: public IInterface
{
@@ -55,6 +56,7 @@
virtual status_t init() = 0;
virtual status_t close() = 0;
virtual status_t release() = 0;
+ virtual sp<ISurfaceTexture> querySurfaceMediaSource() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index ed26e63..69d5001 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -45,6 +45,18 @@
CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1006,
};
+/**
+ *Set CIF as default maximum import and export resolution of video editor.
+ *The maximum import and export resolutions are platform specific,
+ *which should be defined in media_profiles.xml.
+ */
+enum videoeditor_capability {
+ VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH = 352,
+ VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT = 288,
+ VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH = 352,
+ VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT = 288,
+};
+
enum video_decoder {
VIDEO_DECODER_WMV,
};
@@ -117,6 +129,17 @@
int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
/**
+ * Returns the value for the given param name for the video editor cap
+ * param or -1 if error.
+ * Supported param name are:
+ * videoeditor.input.width.max - max input video frame width
+ * videoeditor.input.height.max - max input video frame height
+ * videoeditor.output.width.max - max output video frame width
+ * videoeditor.output.height.max - max output video frame height
+ */
+ int getVideoEditorCapParamByName(const char *name) const;
+
+ /**
* Returns the audio encoders supported.
*/
Vector<audio_encoder> getAudioEncoders() const;
@@ -164,7 +187,7 @@
MediaProfiles& operator=(const MediaProfiles&); // Don't call me
MediaProfiles(const MediaProfiles&); // Don't call me
- MediaProfiles() {} // Dummy default constructor
+ MediaProfiles() { mVideoEditorCap = NULL; } // Dummy default constructor
~MediaProfiles(); // Don't delete me
struct VideoCodec {
@@ -310,6 +333,22 @@
Vector<int> mLevels;
};
+ struct VideoEditorCap {
+ VideoEditorCap(int inFrameWidth, int inFrameHeight,
+ int outFrameWidth, int outFrameHeight)
+ : mMaxInputFrameWidth(inFrameWidth),
+ mMaxInputFrameHeight(inFrameHeight),
+ mMaxOutputFrameWidth(outFrameWidth),
+ mMaxOutputFrameHeight(outFrameHeight) {}
+
+ ~VideoEditorCap() {}
+
+ int mMaxInputFrameWidth;
+ int mMaxInputFrameHeight;
+ int mMaxOutputFrameWidth;
+ int mMaxOutputFrameHeight;
+ };
+
int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
void initRequiredProfileRefs(const Vector<int>& cameraIds);
int getRequiredProfileRefIndex(int cameraId);
@@ -321,6 +360,7 @@
static void logAudioEncoderCap(const AudioEncoderCap& cap);
static void logVideoDecoderCap(const VideoDecoderCap& cap);
static void logAudioDecoderCap(const AudioDecoderCap& cap);
+ static void logVideoEditorCap(const VideoEditorCap& cap);
// If the xml configuration file does exist, use the settings
// from the xml
@@ -332,6 +372,8 @@
static VideoDecoderCap* createVideoDecoderCap(const char **atts);
static VideoEncoderCap* createVideoEncoderCap(const char **atts);
static AudioEncoderCap* createAudioEncoderCap(const char **atts);
+ static VideoEditorCap* createVideoEditorCap(
+ const char **atts, MediaProfiles *profiles);
static CamcorderProfile* createCamcorderProfile(
int cameraId, const char **atts, Vector<int>& cameraIds);
@@ -375,6 +417,7 @@
static void createDefaultEncoderOutputFileFormats(MediaProfiles *profiles);
static void createDefaultImageEncodingQualityLevels(MediaProfiles *profiles);
static void createDefaultImageDecodingMaxMemory(MediaProfiles *profiles);
+ static void createDefaultVideoEditorCap(MediaProfiles *profiles);
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
static AudioEncoderCap* createDefaultAmrNBEncoderCap();
@@ -431,6 +474,7 @@
RequiredProfiles *mRequiredProfileRefs;
Vector<int> mCameraIds;
+ VideoEditorCap* mVideoEditorCap;
};
}; // namespace android
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index 1c08969..ef799f5 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -26,6 +26,7 @@
class ICameraRecordingProxy;
class Surface;
+class ISurfaceTexture;
struct MediaRecorderBase {
MediaRecorderBase() {}
@@ -54,6 +55,7 @@
virtual status_t reset() = 0;
virtual status_t getMaxAmplitude(int *max) = 0;
virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
+ virtual sp<ISurfaceTexture> querySurfaceMediaSource() const = 0;
private:
MediaRecorderBase(const MediaRecorderBase &);
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index ea5a9d3..1136f6c 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -25,6 +25,8 @@
#include <utils/KeyedVector.h>
#include <utils/String8.h>
+class ANativeWindow;
+
namespace android {
class Surface;
@@ -196,6 +198,8 @@
status_t prepareAsync_l();
status_t getDuration_l(int *msec);
status_t setDataSource(const sp<IMediaPlayer>& player);
+ void disconnectNativeWindow();
+ status_t reset_l();
sp<IMediaPlayer> mPlayer;
thread_id_t mLockThreadId;
@@ -218,6 +222,8 @@
int mVideoHeight;
int mAudioSessionId;
float mSendLevel;
+ sp<ANativeWindow> mConnectedWindow;
+ sp<IBinder> mConnectedWindowBinder;
};
}; // namespace android
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index af12d3c..72d3736 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -31,12 +31,15 @@
class IMediaRecorder;
class ICamera;
class ICameraRecordingProxy;
+class ISurfaceTexture;
+class SurfaceTextureClient;
typedef void (*media_completion_f)(status_t status, void *cookie);
enum video_source {
VIDEO_SOURCE_DEFAULT = 0,
VIDEO_SOURCE_CAMERA = 1,
+ VIDEO_SOURCE_GRALLOC_BUFFER = 2,
VIDEO_SOURCE_LIST_END // must be last - used to validate audio source type
};
@@ -226,6 +229,7 @@
status_t close();
status_t release();
void notify(int msg, int ext1, int ext2);
+ sp<ISurfaceTexture> querySurfaceMediaSourceFromMediaServer();
private:
void doCleanUp();
@@ -233,6 +237,12 @@
sp<IMediaRecorder> mMediaRecorder;
sp<MediaRecorderListener> mListener;
+
+ // Reference toISurfaceTexture
+ // for encoding GL Frames. That is useful only when the
+ // video source is set to VIDEO_SOURCE_GRALLOC_BUFFER
+ sp<ISurfaceTexture> mSurfaceMediaSource;
+
media_recorder_states mCurrentState;
bool mIsAudioSourceSet;
bool mIsVideoSourceSet;
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index 765c039..803bffb 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -23,23 +23,33 @@
#include <utils/Errors.h>
#include <pthread.h>
+struct dirent;
+
namespace android {
class MediaScannerClient;
class StringArray;
+enum MediaScanResult {
+ // This file or directory was scanned successfully.
+ MEDIA_SCAN_RESULT_OK,
+ // This file or directory was skipped because it was not found, could
+ // not be opened, was of an unsupported type, or was malfored in some way.
+ MEDIA_SCAN_RESULT_SKIPPED,
+ // The scan should be aborted due to a fatal error such as out of memory
+ // or an exception.
+ MEDIA_SCAN_RESULT_ERROR,
+};
+
struct MediaScanner {
MediaScanner();
virtual ~MediaScanner();
- virtual status_t processFile(
- const char *path, const char *mimeType,
- MediaScannerClient &client) = 0;
+ virtual MediaScanResult processFile(
+ const char *path, const char *mimeType, MediaScannerClient &client) = 0;
- typedef bool (*ExceptionCheck)(void* env);
- virtual status_t processDirectory(
- const char *path, MediaScannerClient &client,
- ExceptionCheck exceptionCheck, void *exceptionEnv);
+ virtual MediaScanResult processDirectory(
+ const char *path, MediaScannerClient &client);
void setLocale(const char *locale);
@@ -53,9 +63,11 @@
// current locale (like "ja_JP"), created/destroyed with strdup()/free()
char *mLocale;
- status_t doProcessDirectory(
- char *path, int pathRemaining, MediaScannerClient &client,
- bool noMedia, ExceptionCheck exceptionCheck, void *exceptionEnv);
+ MediaScanResult doProcessDirectory(
+ char *path, int pathRemaining, MediaScannerClient &client, bool noMedia);
+ MediaScanResult doProcessDirectoryEntry(
+ char *path, int pathRemaining, MediaScannerClient &client, bool noMedia,
+ struct dirent* entry, char* fileSpot);
MediaScanner(const MediaScanner &);
MediaScanner &operator=(const MediaScanner &);
@@ -68,13 +80,13 @@
virtual ~MediaScannerClient();
void setLocale(const char* locale);
void beginFile();
- bool addStringTag(const char* name, const char* value);
+ status_t addStringTag(const char* name, const char* value);
void endFile();
- virtual bool scanFile(const char* path, long long lastModified,
+ virtual status_t scanFile(const char* path, long long lastModified,
long long fileSize, bool isDirectory, bool noMedia) = 0;
- virtual bool handleStringTag(const char* name, const char* value) = 0;
- virtual bool setMimeType(const char* mimeType) = 0;
+ virtual status_t handleStringTag(const char* name, const char* value) = 0;
+ virtual status_t setMimeType(const char* mimeType) = 0;
protected:
void convertValues(uint32_t encoding);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 48d1464..713af92 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -20,6 +20,7 @@
#include <sys/types.h>
+#include <media/stagefright/MediaErrors.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/List.h>
@@ -61,6 +62,10 @@
return 0;
}
+ virtual status_t reconnectAtOffset(off64_t offset) {
+ return ERROR_UNSUPPORTED;
+ }
+
////////////////////////////////////////////////////////////////////////////
bool sniff(String8 *mimeType, float *confidence, sp<AMessage> *meta);
diff --git a/include/media/stagefright/HardwareAPI.h b/include/media/stagefright/HardwareAPI.h
index 946a0aa..32eed3f 100644
--- a/include/media/stagefright/HardwareAPI.h
+++ b/include/media/stagefright/HardwareAPI.h
@@ -99,6 +99,13 @@
OMX_U32 nUsage; // OUT
};
+// An enum OMX_COLOR_FormatAndroidOpaque to indicate an opaque colorformat
+// is declared in media/stagefright/openmax/OMX_IVCommon.h
+// This will inform the encoder that the actual
+// colorformat will be relayed by the GRalloc Buffers.
+// OMX_COLOR_FormatAndroidOpaque = 0x7F000001,
+
+
} // namespace android
extern android::OMXPluginBase *createOMXPlugin();
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index 37dbcd8..3818e63 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -29,7 +29,7 @@
class MediaBuffer;
class MetaData;
-struct MediaSource : public RefBase {
+struct MediaSource : public virtual RefBase {
MediaSource();
// To be called before any other methods on this object, except
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 77492ca..2932744 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -349,6 +349,8 @@
// that encode content of the given type.
// profile and level indications only make sense for h.263, mpeg4 and avc
// video.
+// If hwCodecOnly==true, only returns hardware-based components, software and
+// hardware otherwise.
// The profile/level values correspond to
// OMX_VIDEO_H263PROFILETYPE, OMX_VIDEO_MPEG4PROFILETYPE,
// OMX_VIDEO_AVCPROFILETYPE, OMX_VIDEO_H263LEVELTYPE, OMX_VIDEO_MPEG4LEVELTYPE
@@ -356,6 +358,11 @@
status_t QueryCodecs(
const sp<IOMX> &omx,
+ const char *mimeType, bool queryDecoders, bool hwCodecOnly,
+ Vector<CodecCapabilities> *results);
+
+status_t QueryCodecs(
+ const sp<IOMX> &omx,
const char *mimeType, bool queryDecoders,
Vector<CodecCapabilities> *results);
diff --git a/include/media/stagefright/StagefrightMediaScanner.h b/include/media/stagefright/StagefrightMediaScanner.h
index 108acb4..6510a59 100644
--- a/include/media/stagefright/StagefrightMediaScanner.h
+++ b/include/media/stagefright/StagefrightMediaScanner.h
@@ -26,7 +26,7 @@
StagefrightMediaScanner();
virtual ~StagefrightMediaScanner();
- virtual status_t processFile(
+ virtual MediaScanResult processFile(
const char *path, const char *mimeType,
MediaScannerClient &client);
@@ -35,6 +35,10 @@
private:
StagefrightMediaScanner(const StagefrightMediaScanner &);
StagefrightMediaScanner &operator=(const StagefrightMediaScanner &);
+
+ MediaScanResult processFileInternal(
+ const char *path, const char *mimeType,
+ MediaScannerClient &client);
};
} // namespace android
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
new file mode 100644
index 0000000..56bd9c3
--- /dev/null
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_SURFACEMEDIASOURCE_H
+#define ANDROID_GUI_SURFACEMEDIASOURCE_H
+
+#include <gui/ISurfaceTexture.h>
+
+#include <utils/threads.h>
+#include <utils/Vector.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaBuffer.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+class IGraphicBufferAlloc;
+class String8;
+class GraphicBuffer;
+
+class SurfaceMediaSource : public BnSurfaceTexture, public MediaSource,
+ public MediaBufferObserver {
+public:
+ enum { MIN_UNDEQUEUED_BUFFERS = 3 };
+ enum {
+ MIN_ASYNC_BUFFER_SLOTS = MIN_UNDEQUEUED_BUFFERS + 1,
+ MIN_SYNC_BUFFER_SLOTS = MIN_UNDEQUEUED_BUFFERS
+ };
+ enum { NUM_BUFFER_SLOTS = 32 };
+ enum { NO_CONNECTED_API = 0 };
+
+ struct FrameAvailableListener : public virtual RefBase {
+ // onFrameAvailable() is called from queueBuffer() is the FIFO is
+ // empty. You can use SurfaceMediaSource::getQueuedCount() to
+ // figure out if there are more frames waiting.
+ // This is called without any lock held can be called concurrently by
+ // multiple threads.
+ virtual void onFrameAvailable() = 0;
+ };
+
+ SurfaceMediaSource(uint32_t bufW, uint32_t bufH);
+
+ virtual ~SurfaceMediaSource();
+
+
+ // For the MediaSource interface for use by StageFrightRecorder:
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+ virtual sp<MetaData> getFormat();
+
+ // Get / Set the frame rate used for encoding. Default fps = 30
+ status_t setFrameRate(int32_t fps) ;
+ int32_t getFrameRate( ) const;
+
+ // The call for the StageFrightRecorder to tell us that
+ // it is done using the MediaBuffer data so that its state
+ // can be set to FREE for dequeuing
+ virtual void signalBufferReturned(MediaBuffer* buffer);
+ // end of MediaSource interface
+
+ uint32_t getBufferCount( ) const { return mBufferCount;}
+
+
+ // setBufferCount updates the number of available buffer slots. After
+ // calling this all buffer slots are both unallocated and owned by the
+ // SurfaceMediaSource object (i.e. they are not owned by the client).
+ virtual status_t setBufferCount(int bufferCount);
+
+ virtual status_t requestBuffer(int slot, sp<GraphicBuffer>* buf);
+
+ // dequeueBuffer gets the next buffer slot index for the client to use. If a
+ // buffer slot is available then that slot index is written to the location
+ // pointed to by the buf argument and a status of OK is returned. If no
+ // slot is available then a status of -EBUSY is returned and buf is
+ // unmodified.
+ virtual status_t dequeueBuffer(int *buf, uint32_t w, uint32_t h,
+ uint32_t format, uint32_t usage);
+
+ // queueBuffer returns a filled buffer to the SurfaceMediaSource. In addition, a
+ // timestamp must be provided for the buffer. The timestamp is in
+ // nanoseconds, and must be monotonically increasing. Its other semantics
+ // (zero point, etc) are client-dependent and should be documented by the
+ // client.
+ virtual status_t queueBuffer(int buf, int64_t timestamp,
+ uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform);
+ virtual void cancelBuffer(int buf);
+
+ // onFrameReceivedLocked informs the buffer consumers (StageFrightRecorder)
+ // or listeners that a frame has been received
+ // The buffer is not made available for dequeueing immediately. We need to
+ // wait to hear from StageFrightRecorder to set the buffer FREE
+ // Make sure this is called when the mutex is locked
+ virtual status_t onFrameReceivedLocked();
+
+ virtual status_t setScalingMode(int mode) { } // no op for encoding
+ virtual int query(int what, int* value);
+
+ // Just confirming to the ISurfaceTexture interface as of now
+ virtual status_t setCrop(const Rect& reg) { return OK; }
+ virtual status_t setTransform(uint32_t transform) {return OK;}
+
+ // setSynchronousMode set whether dequeueBuffer is synchronous or
+ // asynchronous. In synchronous mode, dequeueBuffer blocks until
+ // a buffer is available, the currently bound buffer can be dequeued and
+ // queued buffers will be retired in order.
+ // The default mode is synchronous.
+ // TODO: Clarify the minute differences bet sycn /async
+ // modes (S.Encoder vis-a-vis SurfaceTexture)
+ virtual status_t setSynchronousMode(bool enabled);
+
+ // connect attempts to connect a client API to the SurfaceMediaSource. This
+ // must be called before any other ISurfaceTexture methods are called except
+ // for getAllocator.
+ //
+ // This method will fail if the connect was previously called on the
+ // SurfaceMediaSource and no corresponding disconnect call was made.
+ virtual status_t connect(int api);
+
+ // disconnect attempts to disconnect a client API from the SurfaceMediaSource.
+ // Calling this method will cause any subsequent calls to other
+ // ISurfaceTexture methods to fail except for getAllocator and connect.
+ // Successfully calling connect after this will allow the other methods to
+ // succeed again.
+ //
+ // This method will fail if the the SurfaceMediaSource is not currently
+ // connected to the specified client API.
+ virtual status_t disconnect(int api);
+
+ // getqueuedCount returns the number of queued frames waiting in the
+ // FIFO. In asynchronous mode, this always returns 0 or 1 since
+ // frames are not accumulating in the FIFO.
+ size_t getQueuedCount() const;
+
+ // setBufferCountServer set the buffer count. If the client has requested
+ // a buffer count using setBufferCount, the server-buffer count will
+ // take effect once the client sets the count back to zero.
+ status_t setBufferCountServer(int bufferCount);
+
+ // getTimestamp retrieves the timestamp associated with the image
+ // set by the most recent call to updateFrameInfoLocked().
+ //
+ // The timestamp is in nanoseconds, and is monotonically increasing. Its
+ // other semantics (zero point, etc) are source-dependent and should be
+ // documented by the source.
+ int64_t getTimestamp();
+
+ // setFrameAvailableListener sets the listener object that will be notified
+ // when a new frame becomes available.
+ void setFrameAvailableListener(const sp<FrameAvailableListener>& listener);
+
+ // getCurrentBuffer returns the buffer associated with the current image.
+ sp<GraphicBuffer> getCurrentBuffer() const;
+
+ // dump our state in a String
+ void dump(String8& result) const;
+ void dump(String8& result, const char* prefix, char* buffer,
+ size_t SIZE) const;
+
+ // isMetaDataStoredInVideoBuffers tells the encoder whether we will
+ // pass metadata through the buffers. Currently, it is force set to true
+ bool isMetaDataStoredInVideoBuffers() const;
+
+protected:
+
+ // freeAllBuffers frees the resources (both GraphicBuffer and EGLImage) for
+ // all slots.
+ void freeAllBuffers();
+ static bool isExternalFormat(uint32_t format);
+
+private:
+
+ status_t setBufferCountServerLocked(int bufferCount);
+
+ enum { INVALID_BUFFER_SLOT = -1 };
+
+ struct BufferSlot {
+
+ BufferSlot()
+ : mBufferState(BufferSlot::FREE),
+ mRequestBufferCalled(false),
+ mTimestamp(0) {
+ }
+
+ // mGraphicBuffer points to the buffer allocated for this slot or is
+ // NULL if no buffer has been allocated.
+ sp<GraphicBuffer> mGraphicBuffer;
+
+ // BufferState represents the different states in which a buffer slot
+ // can be.
+ enum BufferState {
+ // FREE indicates that the buffer is not currently being used and
+ // will not be used in the future until it gets dequeued and
+ // subseqently queued by the client.
+ FREE = 0,
+
+ // DEQUEUED indicates that the buffer has been dequeued by the
+ // client, but has not yet been queued or canceled. The buffer is
+ // considered 'owned' by the client, and the server should not use
+ // it for anything.
+ //
+ // Note that when in synchronous-mode (mSynchronousMode == true),
+ // the buffer that's currently attached to the texture may be
+ // dequeued by the client. That means that the current buffer can
+ // be in either the DEQUEUED or QUEUED state. In asynchronous mode,
+ // however, the current buffer is always in the QUEUED state.
+ DEQUEUED = 1,
+
+ // QUEUED indicates that the buffer has been queued by the client,
+ // and has not since been made available for the client to dequeue.
+ // Attaching the buffer to the texture does NOT transition the
+ // buffer away from the QUEUED state. However, in Synchronous mode
+ // the current buffer may be dequeued by the client under some
+ // circumstances. See the note about the current buffer in the
+ // documentation for DEQUEUED.
+ QUEUED = 2,
+ };
+
+ // mBufferState is the current state of this buffer slot.
+ BufferState mBufferState;
+
+ // mRequestBufferCalled is used for validating that the client did
+ // call requestBuffer() when told to do so. Technically this is not
+ // needed but useful for debugging and catching client bugs.
+ bool mRequestBufferCalled;
+
+ // mTimestamp is the current timestamp for this buffer slot. This gets
+ // to set by queueBuffer each time this slot is queued.
+ int64_t mTimestamp;
+ };
+
+ // mSlots is the array of buffer slots that must be mirrored on the client
+ // side. This allows buffer ownership to be transferred between the client
+ // and server without sending a GraphicBuffer over binder. The entire array
+ // is initialized to NULL at construction time, and buffers are allocated
+ // for a slot when requestBuffer is called with that slot's index.
+ BufferSlot mSlots[NUM_BUFFER_SLOTS];
+
+ // mDefaultWidth holds the default width of allocated buffers. It is used
+ // in requestBuffers() if a width and height of zero is specified.
+ uint32_t mDefaultWidth;
+
+ // mDefaultHeight holds the default height of allocated buffers. It is used
+ // in requestBuffers() if a width and height of zero is specified.
+ uint32_t mDefaultHeight;
+
+ // mPixelFormat holds the pixel format of allocated buffers. It is used
+ // in requestBuffers() if a format of zero is specified.
+ uint32_t mPixelFormat;
+
+ // mBufferCount is the number of buffer slots that the client and server
+ // must maintain. It defaults to MIN_ASYNC_BUFFER_SLOTS and can be changed
+ // by calling setBufferCount or setBufferCountServer
+ int mBufferCount;
+
+ // mClientBufferCount is the number of buffer slots requested by the
+ // client. The default is zero, which means the client doesn't care how
+ // many buffers there are
+ int mClientBufferCount;
+
+ // mServerBufferCount buffer count requested by the server-side
+ int mServerBufferCount;
+
+ // mCurrentSlot is the buffer slot index of the buffer that is currently
+ // being used by buffer consumer
+ // (e.g. StageFrightRecorder in the case of SurfaceMediaSource or GLTexture
+ // in the case of SurfaceTexture).
+ // It is initialized to INVALID_BUFFER_SLOT,
+ // indicating that no buffer slot is currently bound to the texture. Note,
+ // however, that a value of INVALID_BUFFER_SLOT does not necessarily mean
+ // that no buffer is bound to the texture. A call to setBufferCount will
+ // reset mCurrentTexture to INVALID_BUFFER_SLOT.
+ int mCurrentSlot;
+
+
+ // mCurrentBuf is the graphic buffer of the current slot to be used by
+ // buffer consumer. It's possible that this buffer is not associated
+ // with any buffer slot, so we must track it separately in order to
+ // properly use IGraphicBufferAlloc::freeAllGraphicBuffersExcept.
+ sp<GraphicBuffer> mCurrentBuf;
+
+
+ // mCurrentTimestamp is the timestamp for the current texture. It
+ // gets set to mLastQueuedTimestamp each time updateTexImage is called.
+ int64_t mCurrentTimestamp;
+
+ // mGraphicBufferAlloc is the connection to SurfaceFlinger that is used to
+ // allocate new GraphicBuffer objects.
+ sp<IGraphicBufferAlloc> mGraphicBufferAlloc;
+
+ // mFrameAvailableListener is the listener object that will be called when a
+ // new frame becomes available. If it is not NULL it will be called from
+ // queueBuffer.
+ sp<FrameAvailableListener> mFrameAvailableListener;
+
+ // mSynchronousMode whether we're in synchronous mode or not
+ bool mSynchronousMode;
+
+ // mConnectedApi indicates the API that is currently connected to this
+ // SurfaceTexture. It defaults to NO_CONNECTED_API (= 0), and gets updated
+ // by the connect and disconnect methods.
+ int mConnectedApi;
+
+ // mDequeueCondition condition used for dequeueBuffer in synchronous mode
+ mutable Condition mDequeueCondition;
+
+
+ // mQueue is a FIFO of queued buffers used in synchronous mode
+ typedef Vector<int> Fifo;
+ Fifo mQueue;
+
+ // mMutex is the mutex used to prevent concurrent access to the member
+ // variables of SurfaceMediaSource objects. It must be locked whenever the
+ // member variables are accessed.
+ mutable Mutex mMutex;
+
+ ////////////////////////// For MediaSource
+ // Set to a default of 30 fps if not specified by the client side
+ int32_t mFrameRate;
+
+ // mStarted is a flag to check if the recording has started
+ bool mStarted;
+
+ // mFrameAvailableCondition condition used to indicate whether there
+ // is a frame available for dequeuing
+ Condition mFrameAvailableCondition;
+ Condition mFrameCompleteCondition;
+
+ // Avoid copying and equating and default constructor
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SurfaceMediaSource);
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_SURFACEMEDIASOURCE_H
diff --git a/include/media/stagefright/openmax/OMX_IVCommon.h b/include/media/stagefright/openmax/OMX_IVCommon.h
index 7ed072b..97170d7 100644
--- a/include/media/stagefright/openmax/OMX_IVCommon.h
+++ b/include/media/stagefright/openmax/OMX_IVCommon.h
@@ -16,29 +16,29 @@
* -------------------------------------------------------------------
*/
/**
- * Copyright (c) 2008 The Khronos Group Inc.
- *
+ * Copyright (c) 2008 The Khronos Group Inc.
+ *
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject
- * to the following conditions:
+ * to the following conditions:
* The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
+ * in all copies or substantial portions of the Software.
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-/**
+/**
* @file OMX_IVCommon.h - OpenMax IL version 1.1.2
* The structures needed by Video and Image components to exchange
* parameters and configuration data with the components.
@@ -53,7 +53,7 @@
/**
* Each OMX header must include all required header files to allow the header
* to compile without errors. The includes below are required for this header
- * file to compile successfully
+ * file to compile successfully
*/
#include <OMX_Core.h>
@@ -64,8 +64,8 @@
*/
-/**
- * Enumeration defining possible uncompressed image/video formats.
+/**
+ * Enumeration defining possible uncompressed image/video formats.
*
* ENUMS:
* Unused : Placeholder value when format is N/A
@@ -113,7 +113,7 @@
OMX_COLOR_Format16bitBGR565,
OMX_COLOR_Format18bitRGB666,
OMX_COLOR_Format18bitARGB1665,
- OMX_COLOR_Format19bitARGB1666,
+ OMX_COLOR_Format19bitARGB1666,
OMX_COLOR_Format24bitRGB888,
OMX_COLOR_Format24bitBGR888,
OMX_COLOR_Format24bitARGB1887,
@@ -136,55 +136,62 @@
OMX_COLOR_FormatRawBayer8bit,
OMX_COLOR_FormatRawBayer10bit,
OMX_COLOR_FormatRawBayer8bitcompressed,
- OMX_COLOR_FormatL2,
- OMX_COLOR_FormatL4,
- OMX_COLOR_FormatL8,
- OMX_COLOR_FormatL16,
- OMX_COLOR_FormatL24,
+ OMX_COLOR_FormatL2,
+ OMX_COLOR_FormatL4,
+ OMX_COLOR_FormatL8,
+ OMX_COLOR_FormatL16,
+ OMX_COLOR_FormatL24,
OMX_COLOR_FormatL32,
OMX_COLOR_FormatYUV420PackedSemiPlanar,
OMX_COLOR_FormatYUV422PackedSemiPlanar,
OMX_COLOR_Format18BitBGR666,
OMX_COLOR_Format24BitARGB6666,
OMX_COLOR_Format24BitABGR6666,
- OMX_COLOR_FormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_COLOR_FormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_COLOR_FormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
+ /**<Reserved android opaque colorformat. Tells the encoder that
+ * the actual colorformat will be relayed by the
+ * Gralloc Buffers.
+ * FIXME: In the process of reserving some enum values for
+ * Android-specific OMX IL colorformats. Change this enum to
+ * an acceptable range once that is done.*/
+ OMX_COLOR_FormatAndroidOpaque = 0x7F000001,
OMX_TI_COLOR_FormatYUV420PackedSemiPlanar = 0x7F000100,
OMX_QCOM_COLOR_FormatYVU420SemiPlanar = 0x7FA30C00,
OMX_COLOR_FormatMax = 0x7FFFFFFF
} OMX_COLOR_FORMATTYPE;
-/**
+/**
* Defines the matrix for conversion from RGB to YUV or vice versa.
- * iColorMatrix should be initialized with the fixed point values
+ * iColorMatrix should be initialized with the fixed point values
* used in converting between formats.
*/
typedef struct OMX_CONFIG_COLORCONVERSIONTYPE {
OMX_U32 nSize; /**< Size of the structure in bytes */
- OMX_VERSIONTYPE nVersion; /**< OMX specification version info */
+ OMX_VERSIONTYPE nVersion; /**< OMX specification version info */
OMX_U32 nPortIndex; /**< Port that this struct applies to */
OMX_S32 xColorMatrix[3][3]; /**< Stored in signed Q16 format */
OMX_S32 xColorOffset[4]; /**< Stored in signed Q16 format */
}OMX_CONFIG_COLORCONVERSIONTYPE;
-/**
- * Structure defining percent to scale each frame dimension. For example:
+/**
+ * Structure defining percent to scale each frame dimension. For example:
* To make the width 50% larger, use fWidth = 1.5 and to make the width
* 1/2 the original size, use fWidth = 0.5
*/
typedef struct OMX_CONFIG_SCALEFACTORTYPE {
OMX_U32 nSize; /**< Size of the structure in bytes */
- OMX_VERSIONTYPE nVersion; /**< OMX specification version info */
+ OMX_VERSIONTYPE nVersion; /**< OMX specification version info */
OMX_U32 nPortIndex; /**< Port that this struct applies to */
OMX_S32 xWidth; /**< Fixed point value stored as Q16 */
OMX_S32 xHeight; /**< Fixed point value stored as Q16 */
}OMX_CONFIG_SCALEFACTORTYPE;
-/**
- * Enumeration of possible image filter types
+/**
+ * Enumeration of possible image filter types
*/
typedef enum OMX_IMAGEFILTERTYPE {
OMX_ImageFilterNone,
@@ -195,23 +202,23 @@
OMX_ImageFilterOilPaint,
OMX_ImageFilterHatch,
OMX_ImageFilterGpen,
- OMX_ImageFilterAntialias,
- OMX_ImageFilterDeRing,
+ OMX_ImageFilterAntialias,
+ OMX_ImageFilterDeRing,
OMX_ImageFilterSolarize,
- OMX_ImageFilterKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_ImageFilterKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_ImageFilterVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_ImageFilterMax = 0x7FFFFFFF
} OMX_IMAGEFILTERTYPE;
-/**
- * Image filter configuration
+/**
+ * Image filter configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * eImageFilter : Image filter type enumeration
+ * nPortIndex : Port that this structure applies to
+ * eImageFilter : Image filter type enumeration
*/
typedef struct OMX_CONFIG_IMAGEFILTERTYPE {
OMX_U32 nSize;
@@ -221,22 +228,22 @@
} OMX_CONFIG_IMAGEFILTERTYPE;
-/**
- * Customized U and V for color enhancement
+/**
+ * Customized U and V for color enhancement
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
* bColorEnhancement : Enable/disable color enhancement
- * nCustomizedU : Practical values: 16-240, range: 0-255, value set for
+ * nCustomizedU : Practical values: 16-240, range: 0-255, value set for
* U component
- * nCustomizedV : Practical values: 16-240, range: 0-255, value set for
+ * nCustomizedV : Practical values: 16-240, range: 0-255, value set for
* V component
*/
typedef struct OMX_CONFIG_COLORENHANCEMENTTYPE {
OMX_U32 nSize;
- OMX_VERSIONTYPE nVersion;
+ OMX_VERSIONTYPE nVersion;
OMX_U32 nPortIndex;
OMX_BOOL bColorEnhancement;
OMX_U8 nCustomizedU;
@@ -244,12 +251,12 @@
} OMX_CONFIG_COLORENHANCEMENTTYPE;
-/**
- * Define color key and color key mask
+/**
+ * Define color key and color key mask
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
* nARGBColor : 32bit Alpha, Red, Green, Blue Color
* nARGBMask : 32bit Mask for Alpha, Red, Green, Blue channels
@@ -263,12 +270,12 @@
} OMX_CONFIG_COLORKEYTYPE;
-/**
- * List of color blend types for pre/post processing
+/**
+ * List of color blend types for pre/post processing
*
* ENUMS:
* None : No color blending present
- * AlphaConstant : Function is (alpha_constant * src) +
+ * AlphaConstant : Function is (alpha_constant * src) +
* (1 - alpha_constant) * dst)
* AlphaPerPixel : Function is (alpha * src) + (1 - alpha) * dst)
* Alternate : Function is alternating pixels from src and dst
@@ -284,21 +291,21 @@
OMX_ColorBlendAnd,
OMX_ColorBlendOr,
OMX_ColorBlendInvert,
- OMX_ColorBlendKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_ColorBlendKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_ColorBlendVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_ColorBlendMax = 0x7FFFFFFF
} OMX_COLORBLENDTYPE;
-/**
- * Color blend configuration
+/**
+ * Color blend configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
+ * nPortIndex : Port that this structure applies to
* nRGBAlphaConstant : Constant global alpha values when global alpha is used
- * eColorBlend : Color blend type enumeration
+ * eColorBlend : Color blend type enumeration
*/
typedef struct OMX_CONFIG_COLORBLENDTYPE {
OMX_U32 nSize;
@@ -309,15 +316,15 @@
} OMX_CONFIG_COLORBLENDTYPE;
-/**
+/**
* Hold frame dimension
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * nWidth : Frame width in pixels
- * nHeight : Frame height in pixels
+ * nPortIndex : Port that this structure applies to
+ * nWidth : Frame width in pixels
+ * nHeight : Frame height in pixels
*/
typedef struct OMX_FRAMESIZETYPE {
OMX_U32 nSize;
@@ -329,69 +336,69 @@
/**
- * Rotation configuration
+ * Rotation configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * nRotation : +/- integer rotation value
+ * nRotation : +/- integer rotation value
*/
typedef struct OMX_CONFIG_ROTATIONTYPE {
OMX_U32 nSize;
OMX_VERSIONTYPE nVersion;
OMX_U32 nPortIndex;
- OMX_S32 nRotation;
+ OMX_S32 nRotation;
} OMX_CONFIG_ROTATIONTYPE;
-/**
- * Possible mirroring directions for pre/post processing
+/**
+ * Possible mirroring directions for pre/post processing
*
* ENUMS:
- * None : No mirroring
- * Vertical : Vertical mirroring, flip on X axis
- * Horizontal : Horizontal mirroring, flip on Y axis
+ * None : No mirroring
+ * Vertical : Vertical mirroring, flip on X axis
+ * Horizontal : Horizontal mirroring, flip on Y axis
* Both : Both vertical and horizontal mirroring
*/
typedef enum OMX_MIRRORTYPE {
OMX_MirrorNone = 0,
OMX_MirrorVertical,
OMX_MirrorHorizontal,
- OMX_MirrorBoth,
- OMX_MirrorKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_MirrorBoth,
+ OMX_MirrorKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_MirrorVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
- OMX_MirrorMax = 0x7FFFFFFF
+ OMX_MirrorMax = 0x7FFFFFFF
} OMX_MIRRORTYPE;
-/**
- * Mirroring configuration
+/**
+ * Mirroring configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * eMirror : Mirror type enumeration
+ * nPortIndex : Port that this structure applies to
+ * eMirror : Mirror type enumeration
*/
typedef struct OMX_CONFIG_MIRRORTYPE {
OMX_U32 nSize;
- OMX_VERSIONTYPE nVersion;
+ OMX_VERSIONTYPE nVersion;
OMX_U32 nPortIndex;
OMX_MIRRORTYPE eMirror;
} OMX_CONFIG_MIRRORTYPE;
-/**
- * Position information only
+/**
+ * Position information only
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * nX : X coordinate for the point
- * nY : Y coordinate for the point
- */
+ * nX : X coordinate for the point
+ * nY : Y coordinate for the point
+ */
typedef struct OMX_CONFIG_POINTTYPE {
OMX_U32 nSize;
OMX_VERSIONTYPE nVersion;
@@ -401,37 +408,37 @@
} OMX_CONFIG_POINTTYPE;
-/**
- * Frame size plus position
+/**
+ * Frame size plus position
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
+ * nPortIndex : Port that this structure applies to
* nLeft : X Coordinate of the top left corner of the rectangle
* nTop : Y Coordinate of the top left corner of the rectangle
- * nWidth : Width of the rectangle
- * nHeight : Height of the rectangle
+ * nWidth : Width of the rectangle
+ * nHeight : Height of the rectangle
*/
typedef struct OMX_CONFIG_RECTTYPE {
OMX_U32 nSize;
- OMX_VERSIONTYPE nVersion;
- OMX_U32 nPortIndex;
- OMX_S32 nLeft;
+ OMX_VERSIONTYPE nVersion;
+ OMX_U32 nPortIndex;
+ OMX_S32 nLeft;
OMX_S32 nTop;
OMX_U32 nWidth;
OMX_U32 nHeight;
} OMX_CONFIG_RECTTYPE;
-/**
- * Deblocking state; it is required to be set up before starting the codec
+/**
+ * Deblocking state; it is required to be set up before starting the codec
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * bDeblocking : Enable/disable deblocking mode
+ * bDeblocking : Enable/disable deblocking mode
*/
typedef struct OMX_PARAM_DEBLOCKINGTYPE {
OMX_U32 nSize;
@@ -441,13 +448,13 @@
} OMX_PARAM_DEBLOCKINGTYPE;
-/**
- * Stabilization state
+/**
+ * Stabilization state
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
+ * nPortIndex : Port that this structure applies to
* bStab : Enable/disable frame stabilization state
*/
typedef struct OMX_CONFIG_FRAMESTABTYPE {
@@ -458,8 +465,8 @@
} OMX_CONFIG_FRAMESTABTYPE;
-/**
- * White Balance control type
+/**
+ * White Balance control type
*
* STRUCT MEMBERS:
* SunLight : Referenced in JSR-234
@@ -476,20 +483,20 @@
OMX_WhiteBalControlIncandescent,
OMX_WhiteBalControlFlash,
OMX_WhiteBalControlHorizon,
- OMX_WhiteBalControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_WhiteBalControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_WhiteBalControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_WhiteBalControlMax = 0x7FFFFFFF
} OMX_WHITEBALCONTROLTYPE;
-/**
- * White Balance control configuration
+/**
+ * White Balance control configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * eWhiteBalControl : White balance enumeration
+ * nPortIndex : Port that this structure applies to
+ * eWhiteBalControl : White balance enumeration
*/
typedef struct OMX_CONFIG_WHITEBALCONTROLTYPE {
OMX_U32 nSize;
@@ -499,8 +506,8 @@
} OMX_CONFIG_WHITEBALCONTROLTYPE;
-/**
- * Exposure control type
+/**
+ * Exposure control type
*/
typedef enum OMX_EXPOSURECONTROLTYPE {
OMX_ExposureControlOff = 0,
@@ -513,20 +520,20 @@
OMX_ExposureControlBeach,
OMX_ExposureControlLargeAperture,
OMX_ExposureControlSmallApperture,
- OMX_ExposureControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_ExposureControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_ExposureControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_ExposureControlMax = 0x7FFFFFFF
} OMX_EXPOSURECONTROLTYPE;
-/**
- * White Balance control configuration
+/**
+ * White Balance control configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * eExposureControl : Exposure control enumeration
+ * nPortIndex : Port that this structure applies to
+ * eExposureControl : Exposure control enumeration
*/
typedef struct OMX_CONFIG_EXPOSURECONTROLTYPE {
OMX_U32 nSize;
@@ -536,16 +543,16 @@
} OMX_CONFIG_EXPOSURECONTROLTYPE;
-/**
- * Defines sensor supported mode.
+/**
+ * Defines sensor supported mode.
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * nFrameRate : Single shot mode is indicated by a 0
+ * nPortIndex : Port that this structure applies to
+ * nFrameRate : Single shot mode is indicated by a 0
* bOneShot : Enable for single shot, disable for streaming
- * sFrameSize : Framesize
+ * sFrameSize : Framesize
*/
typedef struct OMX_PARAM_SENSORMODETYPE {
OMX_U32 nSize;
@@ -557,13 +564,13 @@
} OMX_PARAM_SENSORMODETYPE;
-/**
- * Defines contrast level
+/**
+ * Defines contrast level
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
+ * nPortIndex : Port that this structure applies to
* nContrast : Values allowed for contrast -100 to 100, zero means no change
*/
typedef struct OMX_CONFIG_CONTRASTTYPE {
@@ -574,14 +581,14 @@
} OMX_CONFIG_CONTRASTTYPE;
-/**
- * Defines brightness level
+/**
+ * Defines brightness level
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * nBrightness : 0-100%
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
+ * nPortIndex : Port that this structure applies to
+ * nBrightness : 0-100%
*/
typedef struct OMX_CONFIG_BRIGHTNESSTYPE {
OMX_U32 nSize;
@@ -591,16 +598,16 @@
} OMX_CONFIG_BRIGHTNESSTYPE;
-/**
- * Defines backlight level configuration for a video sink, e.g. LCD panel
+/**
+ * Defines backlight level configuration for a video sink, e.g. LCD panel
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
* nBacklight : Values allowed for backlight 0-100%
- * nTimeout : Number of milliseconds before backlight automatically turns
- * off. A value of 0x0 disables backight timeout
+ * nTimeout : Number of milliseconds before backlight automatically turns
+ * off. A value of 0x0 disables backight timeout
*/
typedef struct OMX_CONFIG_BACKLIGHTTYPE {
OMX_U32 nSize;
@@ -611,12 +618,12 @@
} OMX_CONFIG_BACKLIGHTTYPE;
-/**
- * Defines setting for Gamma
+/**
+ * Defines setting for Gamma
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
* nGamma : Values allowed for gamma -100 to 100, zero means no change
*/
@@ -628,14 +635,14 @@
} OMX_CONFIG_GAMMATYPE;
-/**
- * Define for setting saturation
- *
+/**
+ * Define for setting saturation
+ *
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
* nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * nSaturation : Values allowed for saturation -100 to 100, zero means
+ * nSaturation : Values allowed for saturation -100 to 100, zero means
* no change
*/
typedef struct OMX_CONFIG_SATURATIONTYPE {
@@ -646,14 +653,14 @@
} OMX_CONFIG_SATURATIONTYPE;
-/**
- * Define for setting Lightness
+/**
+ * Define for setting Lightness
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
* nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * nLightness : Values allowed for lightness -100 to 100, zero means no
+ * nLightness : Values allowed for lightness -100 to 100, zero means no
* change
*/
typedef struct OMX_CONFIG_LIGHTNESSTYPE {
@@ -664,17 +671,17 @@
} OMX_CONFIG_LIGHTNESSTYPE;
-/**
- * Plane blend configuration
+/**
+ * Plane blend configuration
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
+ * nSize : Size of the structure in bytes
* nVersion : OMX specification version information
* nPortIndex : Index of input port associated with the plane.
- * nDepth : Depth of the plane in relation to the screen. Higher
- * numbered depths are "behind" lower number depths.
+ * nDepth : Depth of the plane in relation to the screen. Higher
+ * numbered depths are "behind" lower number depths.
* This number defaults to the Port Index number.
- * nAlpha : Transparency blending component for the entire plane.
+ * nAlpha : Transparency blending component for the entire plane.
* See blending modes for more detail.
*/
typedef struct OMX_CONFIG_PLANEBLENDTYPE {
@@ -686,17 +693,17 @@
} OMX_CONFIG_PLANEBLENDTYPE;
-/**
+/**
* Define interlace type
*
* STRUCT MEMBERS:
- * nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nSize : Size of the structure in bytes
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
- * bEnable : Enable control variable for this functionality
+ * bEnable : Enable control variable for this functionality
* (see below)
- * nInterleavePortIndex : Index of input or output port associated with
- * the interleaved plane.
+ * nInterleavePortIndex : Index of input or output port associated with
+ * the interleaved plane.
* pPlanarPortIndexes[4] : Index of input or output planar ports.
*/
typedef struct OMX_PARAM_INTERLEAVETYPE {
@@ -708,8 +715,8 @@
} OMX_PARAM_INTERLEAVETYPE;
-/**
- * Defines the picture effect used for an input picture
+/**
+ * Defines the picture effect used for an input picture
*/
typedef enum OMX_TRANSITIONEFFECTTYPE {
OMX_EffectNone,
@@ -719,18 +726,18 @@
OMX_EffectDissolve,
OMX_EffectWipe,
OMX_EffectUnspecifiedMixOfTwoScenes,
- OMX_EffectKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_EffectKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_EffectVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_EffectMax = 0x7FFFFFFF
} OMX_TRANSITIONEFFECTTYPE;
-/**
- * Structure used to configure current transition effect
+/**
+ * Structure used to configure current transition effect
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
- * nVersion : OMX specification version information
+ * nVersion : OMX specification version information
* nPortIndex : Port that this structure applies to
* eEffect : Effect to enable
*/
@@ -742,43 +749,43 @@
} OMX_CONFIG_TRANSITIONEFFECTTYPE;
-/**
- * Defines possible data unit types for encoded video data. The data unit
+/**
+ * Defines possible data unit types for encoded video data. The data unit
* types are used both for encoded video input for playback as well as
- * encoded video output from recording.
+ * encoded video output from recording.
*/
typedef enum OMX_DATAUNITTYPE {
OMX_DataUnitCodedPicture,
OMX_DataUnitVideoSegment,
OMX_DataUnitSeveralSegments,
OMX_DataUnitArbitraryStreamSection,
- OMX_DataUnitKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_DataUnitKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_DataUnitVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_DataUnitMax = 0x7FFFFFFF
} OMX_DATAUNITTYPE;
-/**
- * Defines possible encapsulation types for coded video data unit. The
- * encapsulation information is used both for encoded video input for
- * playback as well as encoded video output from recording.
+/**
+ * Defines possible encapsulation types for coded video data unit. The
+ * encapsulation information is used both for encoded video input for
+ * playback as well as encoded video output from recording.
*/
typedef enum OMX_DATAUNITENCAPSULATIONTYPE {
OMX_DataEncapsulationElementaryStream,
OMX_DataEncapsulationGenericPayload,
OMX_DataEncapsulationRtpPayload,
- OMX_DataEncapsulationKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_DataEncapsulationKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_DataEncapsulationVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_DataEncapsulationMax = 0x7FFFFFFF
} OMX_DATAUNITENCAPSULATIONTYPE;
-/**
- * Structure used to configure the type of being decoded/encoded
+/**
+ * Structure used to configure the type of being decoded/encoded
*/
typedef struct OMX_PARAM_DATAUNITTYPE {
OMX_U32 nSize; /**< Size of the structure in bytes */
- OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
+ OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
OMX_U32 nPortIndex; /**< Port that this structure applies to */
OMX_DATAUNITTYPE eUnitType;
OMX_DATAUNITENCAPSULATIONTYPE eEncapsulationType;
@@ -786,25 +793,25 @@
/**
- * Defines dither types
+ * Defines dither types
*/
typedef enum OMX_DITHERTYPE {
OMX_DitherNone,
OMX_DitherOrdered,
OMX_DitherErrorDiffusion,
OMX_DitherOther,
- OMX_DitherKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_DitherKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_DitherVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_DitherMax = 0x7FFFFFFF
} OMX_DITHERTYPE;
-/**
- * Structure used to configure current type of dithering
+/**
+ * Structure used to configure current type of dithering
*/
typedef struct OMX_CONFIG_DITHERTYPE {
OMX_U32 nSize; /**< Size of the structure in bytes */
- OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
+ OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
OMX_U32 nPortIndex; /**< Port that this structure applies to */
OMX_DITHERTYPE eDither; /**< Type of dithering to use */
} OMX_CONFIG_DITHERTYPE;
@@ -813,28 +820,28 @@
OMX_U32 nSize;
OMX_VERSIONTYPE nVersion;
OMX_U32 nPortIndex; /**< Port that this structure applies to */
- OMX_BOOL bContinuous; /**< If true then ignore frame rate and emit capture
+ OMX_BOOL bContinuous; /**< If true then ignore frame rate and emit capture
* data as fast as possible (otherwise obey port's frame rate). */
- OMX_BOOL bFrameLimited; /**< If true then terminate capture after the port emits the
- * specified number of frames (otherwise the port does not
- * terminate the capture until instructed to do so by the client).
- * Even if set, the client may manually terminate the capture prior
+ OMX_BOOL bFrameLimited; /**< If true then terminate capture after the port emits the
+ * specified number of frames (otherwise the port does not
+ * terminate the capture until instructed to do so by the client).
+ * Even if set, the client may manually terminate the capture prior
* to reaching the limit. */
OMX_U32 nFrameLimit; /**< Limit on number of frames emitted during a capture (only
* valid if bFrameLimited is set). */
} OMX_CONFIG_CAPTUREMODETYPE;
typedef enum OMX_METERINGTYPE {
-
+
OMX_MeteringModeAverage, /**< Center-weighted average metering. */
OMX_MeteringModeSpot, /**< Spot (partial) metering. */
OMX_MeteringModeMatrix, /**< Matrix or evaluative metering. */
-
- OMX_MeteringKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+
+ OMX_MeteringKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_MeteringVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_EVModeMax = 0x7fffffff
} OMX_METERINGTYPE;
-
+
typedef struct OMX_CONFIG_EXPOSUREVALUETYPE {
OMX_U32 nSize;
OMX_VERSIONTYPE nVersion;
@@ -843,14 +850,14 @@
OMX_S32 xEVCompensation; /**< Fixed point value stored as Q16 */
OMX_U32 nApertureFNumber; /**< e.g. nApertureFNumber = 2 implies "f/2" - Q16 format */
OMX_BOOL bAutoAperture; /**< Whether aperture number is defined automatically */
- OMX_U32 nShutterSpeedMsec; /**< Shutterspeed in milliseconds */
- OMX_BOOL bAutoShutterSpeed; /**< Whether shutter speed is defined automatically */
+ OMX_U32 nShutterSpeedMsec; /**< Shutterspeed in milliseconds */
+ OMX_BOOL bAutoShutterSpeed; /**< Whether shutter speed is defined automatically */
OMX_U32 nSensitivity; /**< e.g. nSensitivity = 100 implies "ISO 100" */
OMX_BOOL bAutoSensitivity; /**< Whether sensitivity is defined automatically */
} OMX_CONFIG_EXPOSUREVALUETYPE;
-/**
- * Focus region configuration
+/**
+ * Focus region configuration
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
@@ -881,8 +888,8 @@
OMX_BOOL bBottomRight;
} OMX_CONFIG_FOCUSREGIONTYPE;
-/**
- * Focus Status type
+/**
+ * Focus Status type
*/
typedef enum OMX_FOCUSSTATUSTYPE {
OMX_FocusStatusOff = 0,
@@ -890,13 +897,13 @@
OMX_FocusStatusReached,
OMX_FocusStatusUnableToReach,
OMX_FocusStatusLost,
- OMX_FocusStatusKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
+ OMX_FocusStatusKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
OMX_FocusStatusVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
OMX_FocusStatusMax = 0x7FFFFFFF
} OMX_FOCUSSTATUSTYPE;
-/**
- * Focus status configuration
+/**
+ * Focus status configuration
*
* STRUCT MEMBERS:
* nSize : Size of the structure in bytes
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index e6a7b37..b8fa487 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -1,5 +1,10 @@
# List of effect libraries to load. Each library element must contain a "path" element
# giving the full path of the library .so file.
+# libraries {
+# <lib name> {
+# path <lib path>
+# }
+# }
libraries {
bundle {
path /system/lib/soundfx/libbundlewrapper.so
@@ -10,6 +15,9 @@
visualizer {
path /system/lib/soundfx/libvisualizer.so
}
+ pre_processing {
+ path /system/lib/soundfx/libaudiopreprocessing.so
+ }
}
# list of effects to load. Each effect element must contain a "library" and a "uuid" element.
@@ -17,6 +25,16 @@
# "libraries" element.
# The name of the effect element is indicative, only the value of the "uuid" element
# designates the effect.
+# The uuid is the implementation specific UUID as specified by the effect vendor. This is not the
+# generic effect type UUID.
+# effects {
+# <fx name> {
+# library <lib name>
+# uuid <effect uuid>
+# }
+# ...
+# }
+
effects {
bassboost {
library bundle
@@ -54,4 +72,55 @@
library visualizer
uuid d069d9e0-8329-11df-9168-0002a5d5c51b
}
+ agc {
+ library pre_processing
+ uuid aa8130e0-66fc-11e0-bad0-0002a5d5c51b
+ }
+ aec {
+ library pre_processing
+ uuid bb392ec0-8d4d-11e0-a896-0002a5d5c51b
+ }
+ ns {
+ library pre_processing
+ uuid c06c8400-8e06-11e0-9cb6-0002a5d5c51b
+ }
}
+# Audio preprocessor configurations.
+# The pre processor configuration consists in a list of elements each describing
+# pre processor settings for a given input source. Valid input source names are:
+# "mic", "camcorder", "voice_recognition", "voice_communication"
+# Each input source element contains a list of effects elements. The name of the effect
+# element must be the name of one of the effects in the "effects" list of the file.
+# Each effect element may optionally contain a list of parameters and their
+# default value to apply when the pre processor effect is created.
+# A parameter is defined by a "param" element and a "value" element. Each of these elements
+# consists in one or more elements specifying a type followed by a value.
+# The types defined are: "int", "short", "float", "bool" and "string"
+# When both "param" and "value" are a single int, a simple form is allowed where just
+# the param and value pair is present in the parameter description
+# pre_processing {
+# <input source name> {
+# <fx name> {
+# <param 1 name> {
+# param {
+# int|short|float|bool|string <value>
+# [ int|short|float|bool|string <value> ]
+# ...
+# }
+# value {
+# int|short|float|bool|string <value>
+# [ int|short|float|bool|string <value> ]
+# ...
+# }
+# }
+# <param 2 name > {<param> <value>}
+# ...
+# }
+# ...
+# }
+# ...
+# }
+
+#
+# TODO: add default audio pre processor configurations after debug and tuning phase
+#
diff --git a/media/libeffects/factory/Android.mk b/media/libeffects/factory/Android.mk
index 26265ae..2f2b974 100644
--- a/media/libeffects/factory/Android.mk
+++ b/media/libeffects/factory/Android.mk
@@ -14,4 +14,7 @@
LOCAL_SHARED_LIBRARIES += libdl
+LOCAL_C_INCLUDES := \
+ system/media/audio_effects/include
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index a9689bc..d333510 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -24,6 +24,7 @@
#include <cutils/misc.h>
#include <cutils/config_utils.h>
+#include <audio_effects/audio_effects_conf.h>
static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects
static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries
diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h
index fcc0dba..c1d4319 100644
--- a/media/libeffects/factory/EffectsFactory.h
+++ b/media/libeffects/factory/EffectsFactory.h
@@ -26,13 +26,6 @@
extern "C" {
#endif
-#define AUDIO_EFFECT_DEFAULT_CONFIG_FILE "/system/etc/audio_effects.conf"
-#define AUDIO_EFFECT_VENDOR_CONFIG_FILE "/vendor/etc/audio_effects.conf"
-#define EFFECTS_TAG "effects"
-#define LIBRARIES_TAG "libraries"
-#define PATH_TAG "path"
-#define LIBRARY_TAG "library"
-#define UUID_TAG "uuid"
typedef struct list_elem_s {
void *object;
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
new file mode 100755
index 0000000..77d40b6
--- /dev/null
+++ b/media/libeffects/preprocessing/Android.mk
@@ -0,0 +1,32 @@
+LOCAL_PATH:= $(call my-dir)
+
+# audio preprocessing wrapper
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libaudiopreprocessing
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx
+
+LOCAL_SRC_FILES:= \
+ PreProcessing.cpp
+
+LOCAL_C_INCLUDES += \
+ external/webrtc/src \
+ external/webrtc/src/modules/interface \
+ external/webrtc/src/modules/audio_processing/main/interface \
+ system/media/audio_effects/include
+
+LOCAL_C_INCLUDES += $(call include-path-for, speex)
+
+LOCAL_SHARED_LIBRARIES := \
+ libwebrtc_audio_preprocessing \
+ libspeexresampler \
+ libutils
+
+ifeq ($(TARGET_SIMULATOR),true)
+LOCAL_LDLIBS += -ldl
+else
+LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
new file mode 100755
index 0000000..ba286a1
--- /dev/null
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -0,0 +1,1609 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#define LOG_TAG "PreProcessing"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <utils/Timers.h>
+#include <hardware/audio_effect.h>
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_agc.h>
+#include <audio_effects/effect_ns.h>
+#include "modules/interface/module_common_types.h"
+#include "modules/audio_processing/main/interface/audio_processing.h"
+#include "speex/speex_resampler.h"
+
+
+//------------------------------------------------------------------------------
+// local definitions
+//------------------------------------------------------------------------------
+
+// maximum number of sessions
+#define PREPROC_NUM_SESSIONS 8
+
+// types of pre processing modules
+enum preproc_id
+{
+ PREPROC_AGC, // Automatic Gain Control
+ PREPROC_AEC, // Acoustic Echo Canceler
+ PREPROC_NS, // Noise Suppressor
+ PREPROC_NUM_EFFECTS
+};
+
+// Session state
+enum preproc_session_state {
+ PREPROC_SESSION_STATE_INIT, // initialized
+ PREPROC_SESSION_STATE_CONFIG // configuration received
+};
+
+// Effect/Preprocessor state
+enum preproc_effect_state {
+ PREPROC_EFFECT_STATE_INIT, // initialized
+ PREPROC_EFFECT_STATE_CREATED, // webRTC engine created
+ PREPROC_EFFECT_STATE_CONFIG, // configuration received/disabled
+ PREPROC_EFFECT_STATE_ACTIVE // active/enabled
+};
+
+// handle on webRTC engine
+typedef void* preproc_fx_handle_t;
+
+typedef struct preproc_session_s preproc_session_t;
+typedef struct preproc_effect_s preproc_effect_t;
+typedef struct preproc_ops_s preproc_ops_t;
+
+// Effect operation table. Functions for all pre processors are declared in sPreProcOps[] table.
+// Function pointer can be null if no action required.
+struct preproc_ops_s {
+ int (* create)(preproc_effect_t *fx);
+ int (* init)(preproc_effect_t *fx);
+ int (* reset)(preproc_effect_t *fx);
+ void (* enable)(preproc_effect_t *fx);
+ void (* disable)(preproc_effect_t *fx);
+ int (* set_parameter)(preproc_effect_t *fx, void *param, void *value);
+ int (* get_parameter)(preproc_effect_t *fx, void *param, size_t *size, void *value);
+ int (* set_device)(preproc_effect_t *fx, uint32_t device);
+};
+
+// Effect context
+struct preproc_effect_s {
+ const struct effect_interface_s *itfe;
+ uint32_t procId; // type of pre processor (enum preproc_id)
+ uint32_t state; // current state (enum preproc_effect_state)
+ preproc_session_t *session; // session the effect is on
+ const preproc_ops_t *ops; // effect ops table
+ preproc_fx_handle_t engine; // handle on webRTC engine
+};
+
+// Session context
+struct preproc_session_s {
+ struct preproc_effect_s effects[PREPROC_NUM_EFFECTS]; // effects in this session
+ uint32_t state; // current state (enum preproc_session_state)
+ int id; // audio session ID
+ int io; // handle of input stream this session is on
+ webrtc::AudioProcessing* apm; // handle on webRTC audio processing module (APM)
+ size_t apmFrameCount; // buffer size for webRTC process (10 ms)
+ uint32_t apmSamplingRate; // webRTC APM sampling rate (8/16 or 32 kHz)
+ size_t frameCount; // buffer size before input resampler ( <=> apmFrameCount)
+ uint32_t samplingRate; // sampling rate at effect process interface
+ uint32_t inChannelCount; // input channel count
+ uint32_t outChannelCount; // output channel count
+ uint32_t createdMsk; // bit field containing IDs of crested pre processors
+ uint32_t enabledMsk; // bit field containing IDs of enabled pre processors
+ uint32_t processedMsk; // bit field containing IDs of pre processors already
+ // processed in current round
+ webrtc::AudioFrame *procFrame; // audio frame passed to webRTC AMP ProcessStream()
+ int16_t *inBuf; // input buffer used when resampling
+ size_t inBufSize; // input buffer size in frames
+ size_t framesIn; // number of frames in input buffer
+ SpeexResamplerState *inResampler; // handle on input speex resampler
+ int16_t *outBuf; // output buffer used when resampling
+ size_t outBufSize; // output buffer size in frames
+ size_t framesOut; // number of frames in output buffer
+ SpeexResamplerState *outResampler; // handle on output speex resampler
+ uint32_t revChannelCount; // number of channels on reverse stream
+ uint32_t revEnabledMsk; // bit field containing IDs of enabled pre processors
+ // with reverse channel
+ uint32_t revProcessedMsk; // bit field containing IDs of pre processors with reverse
+ // channel already processed in current round
+ webrtc::AudioFrame *revFrame; // audio frame passed to webRTC AMP AnalyzeReverseStream()
+ int16_t *revBuf; // reverse channel input buffer
+ size_t revBufSize; // reverse channel input buffer size
+ size_t framesRev; // number of frames in reverse channel input buffer
+ SpeexResamplerState *revResampler; // handle on reverse channel input speex resampler
+};
+
+//------------------------------------------------------------------------------
+// Effect descriptors
+//------------------------------------------------------------------------------
+
+// UUIDs for effect types have been generated from http://www.itu.int/ITU-T/asn1/uuid.html
+// as the pre processing effects are not defined by OpenSL ES
+
+// Automatic Gain Control
+static const effect_descriptor_t sAgcDescriptor = {
+ { 0x0a8abfe0, 0x654c, 0x11e0, 0xba26, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
+ { 0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
+ 0, //FIXME indicate CPU load
+ 0, //FIXME indicate memory usage
+ "Automatic Gain Control",
+ "The Android Open Source Project"
+};
+
+// Acoustic Echo Cancellation
+static const effect_descriptor_t sAecDescriptor = {
+ { 0x7b491460, 0x8d4d, 0x11e0, 0xbd61, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
+ { 0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
+ 0, //FIXME indicate CPU load
+ 0, //FIXME indicate memory usage
+ "Acoustic Echo Canceler",
+ "The Android Open Source Project"
+};
+
+// Noise suppression
+static const effect_descriptor_t sNsDescriptor = {
+ { 0x58b4b260, 0x8e06, 0x11e0, 0xaa8e, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
+ { 0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
+ 0, //FIXME indicate CPU load
+ 0, //FIXME indicate memory usage
+ "Noise Suppression",
+ "The Android Open Source Project"
+};
+
+
+static const effect_descriptor_t *sDescriptors[PREPROC_NUM_EFFECTS] = {
+ &sAgcDescriptor,
+ &sAecDescriptor,
+ &sNsDescriptor
+};
+
+//------------------------------------------------------------------------------
+// Helper functions
+//------------------------------------------------------------------------------
+
+const effect_uuid_t * const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {
+ FX_IID_AGC,
+ FX_IID_AEC,
+ FX_IID_NS
+};
+
+
+const effect_uuid_t * ProcIdToUuid(int procId)
+{
+ if (procId >= PREPROC_NUM_EFFECTS) {
+ return EFFECT_UUID_NULL;
+ }
+ return sUuidToPreProcTable[procId];
+}
+
+uint32_t UuidToProcId(const effect_uuid_t * uuid)
+{
+ size_t i;
+ for (i = 0; i < PREPROC_NUM_EFFECTS; i++) {
+ if (memcmp(uuid, sUuidToPreProcTable[i], sizeof(*uuid)) == 0) {
+ break;
+ }
+ }
+ return i;
+}
+
+bool HasReverseStream(uint32_t procId)
+{
+ if (procId == PREPROC_AEC) {
+ return true;
+ }
+ return false;
+}
+
+
+//------------------------------------------------------------------------------
+// Automatic Gain Control (AGC)
+//------------------------------------------------------------------------------
+
+static const int kAgcDefaultTargetLevel = 0;
+static const int kAgcDefaultCompGain = 90;
+static const bool kAgcDefaultLimiter = true;
+
+int AgcInit (preproc_effect_t *effect)
+{
+ LOGV("AgcInit");
+ webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ agc->set_mode(webrtc::GainControl::kFixedDigital);
+ agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
+ agc->set_compression_gain_db(kAgcDefaultCompGain);
+ agc->enable_limiter(kAgcDefaultLimiter);
+ return 0;
+}
+
+int AgcCreate(preproc_effect_t *effect)
+{
+ webrtc::GainControl *agc = effect->session->apm->gain_control();
+ LOGV("AgcCreate got agc %p", agc);
+ if (agc == NULL) {
+ LOGW("AgcCreate Error");
+ return -ENOMEM;
+ }
+ effect->engine = static_cast<preproc_fx_handle_t>(agc);
+ AgcInit(effect);
+ return 0;
+}
+
+int AgcGetParameter(preproc_effect_t *effect,
+ void *pParam,
+ size_t *pValueSize,
+ void *pValue)
+{
+ int status = 0;
+ uint32_t param = *(uint32_t *)pParam;
+ t_agc_settings *pProperties = (t_agc_settings *)pValue;
+ webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+
+ switch (param) {
+ case AGC_PARAM_TARGET_LEVEL:
+ case AGC_PARAM_COMP_GAIN:
+ if (*pValueSize < sizeof(int16_t)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ if (*pValueSize < sizeof(bool)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+ case AGC_PARAM_PROPERTIES:
+ if (*pValueSize < sizeof(t_agc_settings)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ LOGW("AgcGetParameter() unknown param %08x", param);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (param) {
+ case AGC_PARAM_TARGET_LEVEL:
+ *(int16_t *) pValue = (int16_t)(agc->target_level_dbfs() * -100);
+ LOGV("AgcGetParameter() target level %d milliBels", *(int16_t *) pValue);
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ *(int16_t *) pValue = (int16_t)(agc->compression_gain_db() * 100);
+ LOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t *) pValue);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ *(bool *) pValue = (bool)agc->is_limiter_enabled();
+ LOGV("AgcGetParameter() limiter enabled %s",
+ (*(int16_t *) pValue != 0) ? "true" : "false");
+ break;
+ case AGC_PARAM_PROPERTIES:
+ pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
+ pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
+ pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
+ break;
+ default:
+ LOGW("AgcGetParameter() unknown param %d", param);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+int AgcSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
+{
+ int status = 0;
+ uint32_t param = *(uint32_t *)pParam;
+ t_agc_settings *pProperties = (t_agc_settings *)pValue;
+ webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+
+ switch (param) {
+ case AGC_PARAM_TARGET_LEVEL:
+ LOGV("AgcSetParameter() target level %d milliBels", *(int16_t *)pValue);
+ status = agc->set_target_level_dbfs(-(*(int16_t *)pValue / 100));
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ LOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t *)pValue);
+ status = agc->set_compression_gain_db(*(int16_t *)pValue / 100);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ LOGV("AgcSetParameter() limiter enabled %s", *(bool *)pValue ? "true" : "false");
+ status = agc->enable_limiter(*(bool *)pValue);
+ break;
+ case AGC_PARAM_PROPERTIES:
+ LOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
+ pProperties->targetLevel,
+ pProperties->compGain,
+ pProperties->limiterEnabled);
+ status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
+ if (status != 0) break;
+ status = agc->set_compression_gain_db(pProperties->compGain / 100);
+ if (status != 0) break;
+ status = agc->enable_limiter(pProperties->limiterEnabled);
+ break;
+ default:
+ LOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+ status = -EINVAL;
+ break;
+ }
+
+ LOGV("AgcSetParameter() done status %d", status);
+
+ return status;
+}
+
+void AgcEnable(preproc_effect_t *effect)
+{
+ webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ LOGV("AgcEnable agc %p", agc);
+ agc->Enable(true);
+}
+
+void AgcDisable(preproc_effect_t *effect)
+{
+ LOGV("AgcDisable");
+ webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ agc->Enable(false);
+}
+
+
+static const preproc_ops_t sAgcOps = {
+ AgcCreate,
+ AgcInit,
+ NULL,
+ AgcEnable,
+ AgcDisable,
+ AgcSetParameter,
+ AgcGetParameter,
+ NULL
+};
+
+
+//------------------------------------------------------------------------------
+// Acoustic Echo Canceler (AEC)
+//------------------------------------------------------------------------------
+
+static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
+ webrtc::EchoControlMobile::kEarpiece;
+static const bool kAecDefaultComfortNoise = true;
+
+int AecInit (preproc_effect_t *effect)
+{
+ LOGV("AecInit");
+ webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ aec->set_routing_mode(kAecDefaultMode);
+ aec->enable_comfort_noise(kAecDefaultComfortNoise);
+ return 0;
+}
+
+int AecCreate(preproc_effect_t *effect)
+{
+ webrtc::EchoControlMobile *aec = effect->session->apm->echo_control_mobile();
+ LOGV("AecCreate got aec %p", aec);
+ if (aec == NULL) {
+ LOGW("AgcCreate Error");
+ return -ENOMEM;
+ }
+ effect->engine = static_cast<preproc_fx_handle_t>(aec);
+ AecInit (effect);
+ return 0;
+}
+
+int AecGetParameter(preproc_effect_t *effect,
+ void *pParam,
+ size_t *pValueSize,
+ void *pValue)
+{
+ int status = 0;
+ uint32_t param = *(uint32_t *)pParam;
+
+ if (*pValueSize < sizeof(uint32_t)) {
+ return -EINVAL;
+ }
+ switch (param) {
+ case AEC_PARAM_ECHO_DELAY:
+ case AEC_PARAM_PROPERTIES:
+ *(uint32_t *)pValue = 1000 * effect->session->apm->stream_delay_ms();
+ LOGV("AecGetParameter() echo delay %d us", *(uint32_t *)pValue);
+ break;
+ default:
+ LOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+int AecSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
+{
+ int status = 0;
+ uint32_t param = *(uint32_t *)pParam;
+ uint32_t value = *(uint32_t *)pValue;
+
+ switch (param) {
+ case AEC_PARAM_ECHO_DELAY:
+ case AEC_PARAM_PROPERTIES:
+ status = effect->session->apm->set_stream_delay_ms(value/1000);
+ LOGV("AecSetParameter() echo delay %d us, status %d", value, status);
+ break;
+ default:
+ LOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+void AecEnable(preproc_effect_t *effect)
+{
+ webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ LOGV("AecEnable aec %p", aec);
+ aec->Enable(true);
+}
+
+void AecDisable(preproc_effect_t *effect)
+{
+ LOGV("AecDisable");
+ webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ aec->Enable(false);
+}
+
+int AecSetDevice(preproc_effect_t *effect, uint32_t device)
+{
+ LOGV("AecSetDevice %08x", device);
+ webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ webrtc::EchoControlMobile::RoutingMode mode = webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
+
+ switch(device) {
+ case AUDIO_DEVICE_OUT_EARPIECE:
+ mode = webrtc::EchoControlMobile::kEarpiece;
+ break;
+ case AUDIO_DEVICE_OUT_SPEAKER:
+ mode = webrtc::EchoControlMobile::kSpeakerphone;
+ break;
+ case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+ case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+ default:
+ break;
+ }
+ aec->set_routing_mode(mode);
+ return 0;
+}
+
+static const preproc_ops_t sAecOps = {
+ AecCreate,
+ AecInit,
+ NULL,
+ AecEnable,
+ AecDisable,
+ AecSetParameter,
+ AecGetParameter,
+ AecSetDevice
+};
+
+//------------------------------------------------------------------------------
+// Noise Suppression (NS)
+//------------------------------------------------------------------------------
+
+static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
+
+int NsInit (preproc_effect_t *effect)
+{
+ LOGV("NsInit");
+ webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ ns->set_level(kNsDefaultLevel);
+ return 0;
+}
+
+int NsCreate(preproc_effect_t *effect)
+{
+ webrtc::NoiseSuppression *ns = effect->session->apm->noise_suppression();
+ LOGV("NsCreate got ns %p", ns);
+ if (ns == NULL) {
+ LOGW("AgcCreate Error");
+ return -ENOMEM;
+ }
+ effect->engine = static_cast<preproc_fx_handle_t>(ns);
+ NsInit (effect);
+ return 0;
+}
+
+int NsGetParameter(preproc_effect_t *effect,
+ void *pParam,
+ size_t *pValueSize,
+ void *pValue)
+{
+ int status = 0;
+ return status;
+}
+
+int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
+{
+ int status = 0;
+ return status;
+}
+
+void NsEnable(preproc_effect_t *effect)
+{
+ webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ LOGV("NsEnable ns %p", ns);
+ ns->Enable(true);
+}
+
+void NsDisable(preproc_effect_t *effect)
+{
+ LOGV("NsDisable");
+ webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ ns->Enable(false);
+}
+
+static const preproc_ops_t sNsOps = {
+ NsCreate,
+ NsInit,
+ NULL,
+ NsEnable,
+ NsDisable,
+ NsSetParameter,
+ NsGetParameter,
+ NULL
+};
+
+
+static const preproc_ops_t *sPreProcOps[PREPROC_NUM_EFFECTS] = {
+ &sAgcOps,
+ &sAecOps,
+ &sNsOps
+};
+
+
+//------------------------------------------------------------------------------
+// Effect functions
+//------------------------------------------------------------------------------
+
+void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled);
+
+extern "C" const struct effect_interface_s sEffectInterface;
+extern "C" const struct effect_interface_s sEffectInterfaceReverse;
+
+#define BAD_STATE_ABORT(from, to) \
+ LOG_ALWAYS_FATAL("Bad state transition from %d to %d", from, to);
+
+int Effect_SetState(preproc_effect_t *effect, uint32_t state)
+{
+ int status = 0;
+ LOGV("Effect_SetState proc %d, new %d old %d", effect->procId, state, effect->state);
+ switch(state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ switch(effect->state) {
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ effect->ops->disable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, false);
+ case PREPROC_EFFECT_STATE_CONFIG:
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_INIT:
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ switch(effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ status = effect->ops->create(effect);
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ case PREPROC_EFFECT_STATE_CONFIG:
+ LOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ case PREPROC_EFFECT_STATE_CONFIG:
+ switch(effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ LOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ effect->ops->disable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, false);
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_CONFIG:
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ switch(effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ LOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ case PREPROC_EFFECT_STATE_CONFIG:
+ effect->ops->enable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, true);
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ if (status == 0) {
+ effect->state = state;
+ }
+ return status;
+}
+
+int Effect_Init(preproc_effect_t *effect, uint32_t procId)
+{
+ if (HasReverseStream(procId)) {
+ effect->itfe = &sEffectInterfaceReverse;
+ } else {
+ effect->itfe = &sEffectInterface;
+ }
+ effect->ops = sPreProcOps[procId];
+ effect->procId = procId;
+ effect->state = PREPROC_EFFECT_STATE_INIT;
+ return 0;
+}
+
+int Effect_Create(preproc_effect_t *effect,
+ preproc_session_t *session,
+ effect_handle_t *interface)
+{
+ effect->session = session;
+ *interface = (effect_handle_t)&effect->itfe;
+ return Effect_SetState(effect, PREPROC_EFFECT_STATE_CREATED);
+}
+
+int Effect_Release(preproc_effect_t *effect)
+{
+ return Effect_SetState(effect, PREPROC_EFFECT_STATE_INIT);
+}
+
+
+//------------------------------------------------------------------------------
+// Session functions
+//------------------------------------------------------------------------------
+
+#define RESAMPLER_QUALITY SPEEX_RESAMPLER_QUALITY_VOIP
+
+static const int kPreprocDefaultSr = 16000;
+static const int kPreProcDefaultCnl = 1;
+
+int Session_Init(preproc_session_t *session)
+{
+ size_t i;
+ int status = 0;
+
+ session->state = PREPROC_SESSION_STATE_INIT;
+ session->id = 0;
+ session->io = 0;
+ session->createdMsk = 0;
+ session->apm = NULL;
+ for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
+ status = Effect_Init(&session->effects[i], i);
+ }
+ return status;
+}
+
+
+extern "C" int Session_CreateEffect(preproc_session_t *session,
+ int32_t procId,
+ effect_handle_t *interface)
+{
+ int status = -ENOMEM;
+
+ LOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
+
+ if (session->createdMsk == 0) {
+ session->apm = webrtc::AudioProcessing::Create(session->io);
+ if (session->apm == NULL) {
+ LOGW("Session_CreateEffect could not get apm engine");
+ goto error;
+ }
+ session->apm->set_sample_rate_hz(kPreprocDefaultSr);
+ session->apm->set_num_channels(kPreProcDefaultCnl, kPreProcDefaultCnl);
+ session->apm->set_num_reverse_channels(kPreProcDefaultCnl);
+ session->procFrame = new webrtc::AudioFrame();
+ if (session->procFrame == NULL) {
+ LOGW("Session_CreateEffect could not allocate audio frame");
+ goto error;
+ }
+ session->revFrame = new webrtc::AudioFrame();
+ if (session->revFrame == NULL) {
+ LOGW("Session_CreateEffect could not allocate reverse audio frame");
+ goto error;
+ }
+ session->apmSamplingRate = kPreprocDefaultSr;
+ session->apmFrameCount = (kPreprocDefaultSr) / 100;
+ session->frameCount = session->apmFrameCount;
+ session->samplingRate = kPreprocDefaultSr;
+ session->inChannelCount = kPreProcDefaultCnl;
+ session->outChannelCount = kPreProcDefaultCnl;
+ session->procFrame->_frequencyInHz = kPreprocDefaultSr;
+ session->procFrame->_audioChannel = kPreProcDefaultCnl;
+ session->revChannelCount = kPreProcDefaultCnl;
+ session->revFrame->_frequencyInHz = kPreprocDefaultSr;
+ session->revFrame->_audioChannel = kPreProcDefaultCnl;
+ session->enabledMsk = 0;
+ session->processedMsk = 0;
+ session->revEnabledMsk = 0;
+ session->revProcessedMsk = 0;
+ session->inResampler = NULL;
+ session->inBuf = NULL;
+ session->inBufSize = 0;
+ session->outResampler = NULL;
+ session->outBuf = NULL;
+ session->outBufSize = 0;
+ session->revResampler = NULL;
+ session->revBuf = NULL;
+ session->revBufSize = 0;
+ }
+ status = Effect_Create(&session->effects[procId], session, interface);
+ if (status < 0) {
+ goto error;
+ }
+ LOGV("Session_CreateEffect OK");
+ session->createdMsk |= (1<<procId);
+ return status;
+
+error:
+ if (session->createdMsk == 0) {
+ delete session->revFrame;
+ session->revFrame = NULL;
+ delete session->procFrame;
+ session->procFrame = NULL;
+ webrtc::AudioProcessing::Destroy(session->apm);
+ session->apm = NULL;
+ }
+ return status;
+}
+
+int Session_ReleaseEffect(preproc_session_t *session,
+ preproc_effect_t *fx)
+{
+ LOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
+ session->createdMsk &= ~(1<<fx->procId);
+ if (session->createdMsk == 0) {
+ webrtc::AudioProcessing::Destroy(session->apm);
+ session->apm = NULL;
+ delete session->procFrame;
+ session->procFrame = NULL;
+ delete session->revFrame;
+ session->revFrame = NULL;
+ if (session->inResampler != NULL) {
+ speex_resampler_destroy(session->inResampler);
+ session->inResampler = NULL;
+ }
+ if (session->outResampler != NULL) {
+ speex_resampler_destroy(session->outResampler);
+ session->outResampler = NULL;
+ }
+ if (session->revResampler != NULL) {
+ speex_resampler_destroy(session->revResampler);
+ session->revResampler = NULL;
+ }
+ delete session->inBuf;
+ session->inBuf = NULL;
+ delete session->outBuf;
+ session->outBuf = NULL;
+ delete session->revBuf;
+ session->revBuf = NULL;
+
+ session->io = 0;
+ }
+
+ return 0;
+}
+
+
+int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
+{
+ uint32_t sr;
+ uint32_t inCnl = popcount(config->inputCfg.channels);
+ uint32_t outCnl = popcount(config->outputCfg.channels);
+
+ if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
+ config->inputCfg.format != config->outputCfg.format ||
+ config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ return -EINVAL;
+ }
+
+ LOGV("Session_SetConfig sr %d cnl %08x",
+ config->inputCfg.samplingRate, config->inputCfg.channels);
+ int status;
+
+ // AEC implementation is limited to 16kHz
+ if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
+ session->apmSamplingRate = 32000;
+ } else
+ if (config->inputCfg.samplingRate >= 16000) {
+ session->apmSamplingRate = 16000;
+ } else if (config->inputCfg.samplingRate >= 8000) {
+ session->apmSamplingRate = 8000;
+ }
+ status = session->apm->set_sample_rate_hz(session->apmSamplingRate);
+ if (status < 0) {
+ return -EINVAL;
+ }
+ status = session->apm->set_num_channels(inCnl, outCnl);
+ if (status < 0) {
+ return -EINVAL;
+ }
+ status = session->apm->set_num_reverse_channels(inCnl);
+ if (status < 0) {
+ return -EINVAL;
+ }
+
+ session->samplingRate = config->inputCfg.samplingRate;
+ session->apmFrameCount = session->apmSamplingRate / 100;
+ if (session->samplingRate == session->apmSamplingRate) {
+ session->frameCount = session->apmFrameCount;
+ } else {
+ session->frameCount = (session->apmFrameCount * session->samplingRate) /
+ session->apmSamplingRate + 1;
+ }
+ session->inChannelCount = inCnl;
+ session->outChannelCount = outCnl;
+ session->procFrame->_audioChannel = inCnl;
+ session->procFrame->_frequencyInHz = session->apmSamplingRate;
+
+ session->revChannelCount = inCnl;
+ session->revFrame->_audioChannel = inCnl;
+ session->revFrame->_frequencyInHz = session->apmSamplingRate;
+
+ if (session->inResampler != NULL) {
+ speex_resampler_destroy(session->inResampler);
+ session->inResampler = NULL;
+ }
+ if (session->outResampler != NULL) {
+ speex_resampler_destroy(session->outResampler);
+ session->outResampler = NULL;
+ }
+ if (session->revResampler != NULL) {
+ speex_resampler_destroy(session->revResampler);
+ session->revResampler = NULL;
+ }
+ if (session->samplingRate != session->apmSamplingRate) {
+ int error;
+ session->inResampler = speex_resampler_init(session->inChannelCount,
+ session->samplingRate,
+ session->apmSamplingRate,
+ RESAMPLER_QUALITY,
+ &error);
+ if (session->inResampler == NULL) {
+ LOGW("Session_SetConfig Cannot create speex resampler: %s",
+ speex_resampler_strerror(error));
+ return -EINVAL;
+ }
+ session->outResampler = speex_resampler_init(session->outChannelCount,
+ session->apmSamplingRate,
+ session->samplingRate,
+ RESAMPLER_QUALITY,
+ &error);
+ if (session->outResampler == NULL) {
+ LOGW("Session_SetConfig Cannot create speex resampler: %s",
+ speex_resampler_strerror(error));
+ speex_resampler_destroy(session->inResampler);
+ session->inResampler = NULL;
+ return -EINVAL;
+ }
+ session->revResampler = speex_resampler_init(session->inChannelCount,
+ session->samplingRate,
+ session->apmSamplingRate,
+ RESAMPLER_QUALITY,
+ &error);
+ if (session->revResampler == NULL) {
+ LOGW("Session_SetConfig Cannot create speex resampler: %s",
+ speex_resampler_strerror(error));
+ speex_resampler_destroy(session->inResampler);
+ session->inResampler = NULL;
+ speex_resampler_destroy(session->outResampler);
+ session->outResampler = NULL;
+ return -EINVAL;
+ }
+ }
+
+ session->state = PREPROC_SESSION_STATE_CONFIG;
+ return 0;
+}
+
+int Session_SetReverseConfig(preproc_session_t *session, effect_config_t *config)
+{
+ if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
+ config->inputCfg.format != config->outputCfg.format ||
+ config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ return -EINVAL;
+ }
+
+ LOGV("Session_SetReverseConfig sr %d cnl %08x",
+ config->inputCfg.samplingRate, config->inputCfg.channels);
+
+ if (session->state < PREPROC_SESSION_STATE_CONFIG) {
+ return -ENOSYS;
+ }
+ if (config->inputCfg.samplingRate != session->samplingRate ||
+ config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ return -EINVAL;
+ }
+ uint32_t inCnl = popcount(config->inputCfg.channels);
+ int status = session->apm->set_num_reverse_channels(inCnl);
+ if (status < 0) {
+ return -EINVAL;
+ }
+ session->revChannelCount = inCnl;
+ session->revFrame->_audioChannel = inCnl;
+ session->revFrame->_frequencyInHz = session->apmSamplingRate;
+ return 0;
+}
+
+void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled)
+{
+ if (enabled) {
+ if(session->enabledMsk == 0) {
+ session->framesIn = 0;
+ if (session->inResampler != NULL) {
+ speex_resampler_reset_mem(session->inResampler);
+ }
+ session->framesOut = 0;
+ if (session->outResampler != NULL) {
+ speex_resampler_reset_mem(session->outResampler);
+ }
+ }
+ session->enabledMsk |= (1 << procId);
+ if (HasReverseStream(procId)) {
+ session->framesRev = 0;
+ if (session->revResampler != NULL) {
+ speex_resampler_reset_mem(session->revResampler);
+ }
+ session->revEnabledMsk |= (1 << procId);
+ }
+ } else {
+ session->enabledMsk &= ~(1 << procId);
+ if (HasReverseStream(procId)) {
+ session->revEnabledMsk &= ~(1 << procId);
+ }
+ }
+ LOGV("Session_SetProcEnabled proc %d, enabled %d enabledMsk %08x revEnabledMsk %08x",
+ procId, enabled, session->enabledMsk, session->revEnabledMsk);
+ session->processedMsk = 0;
+ if (HasReverseStream(procId)) {
+ session->revProcessedMsk = 0;
+ }
+}
+
+//------------------------------------------------------------------------------
+// Bundle functions
+//------------------------------------------------------------------------------
+
+static int sInitStatus = 1;
+static preproc_session_t sSessions[PREPROC_NUM_SESSIONS];
+
+preproc_session_t *PreProc_GetSession(int32_t procId, int32_t sessionId, int32_t ioId)
+{
+ size_t i;
+ int free = -1;
+ for (i = 0; i < PREPROC_NUM_SESSIONS; i++) {
+ if (sSessions[i].io == ioId) {
+ if (sSessions[i].createdMsk & (1 << procId)) {
+ return NULL;
+ }
+ return &sSessions[i];
+ }
+ }
+ for (i = 0; i < PREPROC_NUM_SESSIONS; i++) {
+ if (sSessions[i].io == 0) {
+ sSessions[i].id = sessionId;
+ sSessions[i].io = ioId;
+ return &sSessions[i];
+ }
+ }
+ return NULL;
+}
+
+
+int PreProc_Init() {
+ size_t i;
+ int status = 0;
+
+ if (sInitStatus <= 0) {
+ return sInitStatus;
+ }
+ for (i = 0; i < PREPROC_NUM_SESSIONS && status == 0; i++) {
+ status = Session_Init(&sSessions[i]);
+ }
+ sInitStatus = status;
+ return sInitStatus;
+}
+
+const effect_descriptor_t *PreProc_GetDescriptor(effect_uuid_t *uuid)
+{
+ size_t i;
+ for (i = 0; i < PREPROC_NUM_EFFECTS; i++) {
+ if (memcmp(&sDescriptors[i]->uuid, uuid, sizeof(effect_uuid_t)) == 0) {
+ return sDescriptors[i];
+ }
+ }
+ return NULL;
+}
+
+
+extern "C" {
+
+//------------------------------------------------------------------------------
+// Effect Control Interface Implementation
+//------------------------------------------------------------------------------
+
+int PreProcessingFx_Process(effect_handle_t self,
+ audio_buffer_t *inBuffer,
+ audio_buffer_t *outBuffer)
+{
+ preproc_effect_t * effect = (preproc_effect_t *)self;
+ int status = 0;
+
+ if (effect == NULL){
+ LOGV("PreProcessingFx_Process() ERROR effect == NULL");
+ return -EINVAL;
+ }
+ preproc_session_t * session = (preproc_session_t *)effect->session;
+
+ if (inBuffer == NULL || inBuffer->raw == NULL ||
+ outBuffer == NULL || outBuffer->raw == NULL){
+ LOGW("PreProcessingFx_Process() ERROR bad pointer");
+ return -EINVAL;
+ }
+
+ session->processedMsk |= (1<<effect->procId);
+
+// LOGV("PreProcessingFx_Process In %d frames enabledMsk %08x processedMsk %08x",
+// inBuffer->frameCount, session->enabledMsk, session->processedMsk);
+
+ if ((session->processedMsk & session->enabledMsk) == session->enabledMsk) {
+ effect->session->processedMsk = 0;
+ size_t framesRq = outBuffer->frameCount;
+ size_t framesWr = 0;
+ if (session->framesOut) {
+ size_t fr = session->framesOut;
+ if (outBuffer->frameCount < fr) {
+ fr = outBuffer->frameCount;
+ }
+ memcpy(outBuffer->s16,
+ session->outBuf,
+ fr * session->outChannelCount * sizeof(int16_t));
+ memcpy(session->outBuf,
+ session->outBuf + fr * session->outChannelCount,
+ (session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
+ session->framesOut -= fr;
+ framesWr += fr;
+ }
+ outBuffer->frameCount = framesWr;
+ if (framesWr == framesRq) {
+ inBuffer->frameCount = 0;
+ return 0;
+ }
+
+ if (session->inResampler != NULL) {
+ size_t fr = session->frameCount - session->framesIn;
+ if (inBuffer->frameCount < fr) {
+ fr = inBuffer->frameCount;
+ }
+ if (session->inBufSize < session->framesIn + fr) {
+ session->inBufSize = session->framesIn + fr;
+ session->inBuf = (int16_t *)realloc(session->inBuf,
+ session->inBufSize * session->inChannelCount * sizeof(int16_t));
+ }
+ memcpy(session->inBuf + session->framesIn * session->inChannelCount,
+ inBuffer->s16,
+ fr * session->inChannelCount * sizeof(int16_t));
+
+ session->framesIn += fr;
+ inBuffer->frameCount = fr;
+ if (session->framesIn < session->frameCount) {
+ return 0;
+ }
+ size_t frIn = session->framesIn;
+ size_t frOut = session->apmFrameCount;
+ if (session->inChannelCount == 1) {
+ speex_resampler_process_int(session->inResampler,
+ 0,
+ session->inBuf,
+ &frIn,
+ session->procFrame->_payloadData,
+ &frOut);
+ } else {
+ speex_resampler_process_interleaved_int(session->inResampler,
+ session->inBuf,
+ &frIn,
+ session->procFrame->_payloadData,
+ &frOut);
+ }
+ memcpy(session->inBuf,
+ session->inBuf + frIn * session->inChannelCount,
+ (session->framesIn - frIn) * session->inChannelCount * sizeof(int16_t));
+ session->framesIn -= frIn;
+ } else {
+ size_t fr = session->frameCount - session->framesIn;
+ if (inBuffer->frameCount < fr) {
+ fr = inBuffer->frameCount;
+ }
+ memcpy(session->procFrame->_payloadData + session->framesIn * session->inChannelCount,
+ inBuffer->s16,
+ fr * session->inChannelCount * sizeof(int16_t));
+ session->framesIn += fr;
+ inBuffer->frameCount = fr;
+ if (session->framesIn < session->frameCount) {
+ return 0;
+ }
+ session->framesIn = 0;
+ }
+ session->procFrame->_payloadDataLengthInSamples =
+ session->apmFrameCount * session->inChannelCount;
+
+ effect->session->apm->ProcessStream(session->procFrame);
+
+ if (session->outBufSize < session->framesOut + session->frameCount) {
+ session->outBufSize = session->framesOut + session->frameCount;
+ session->outBuf = (int16_t *)realloc(session->outBuf,
+ session->outBufSize * session->outChannelCount * sizeof(int16_t));
+ }
+
+ if (session->outResampler != NULL) {
+ size_t frIn = session->apmFrameCount;
+ size_t frOut = session->frameCount;
+ if (session->inChannelCount == 1) {
+ speex_resampler_process_int(session->outResampler,
+ 0,
+ session->procFrame->_payloadData,
+ &frIn,
+ session->outBuf + session->framesOut * session->outChannelCount,
+ &frOut);
+ } else {
+ speex_resampler_process_interleaved_int(session->outResampler,
+ session->procFrame->_payloadData,
+ &frIn,
+ session->outBuf + session->framesOut * session->outChannelCount,
+ &frOut);
+ }
+ session->framesOut += frOut;
+ } else {
+ memcpy(session->outBuf + session->framesOut * session->outChannelCount,
+ session->procFrame->_payloadData,
+ session->frameCount * session->outChannelCount * sizeof(int16_t));
+ session->framesOut += session->frameCount;
+ }
+ size_t fr = session->framesOut;
+ if (framesRq - framesWr < fr) {
+ fr = framesRq - framesWr;
+ }
+ memcpy(outBuffer->s16 + framesWr * session->outChannelCount,
+ session->outBuf,
+ fr * session->outChannelCount * sizeof(int16_t));
+ memcpy(session->outBuf,
+ session->outBuf + fr * session->outChannelCount,
+ (session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
+ session->framesOut -= fr;
+ outBuffer->frameCount += fr;
+
+ return 0;
+ } else {
+ return -ENODATA;
+ }
+}
+
+int PreProcessingFx_Command(effect_handle_t self,
+ uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t *replySize,
+ void *pReplyData)
+{
+ preproc_effect_t * effect = (preproc_effect_t *) self;
+ int retsize;
+ int status;
+
+ if (effect == NULL){
+ return -EINVAL;
+ }
+
+ //LOGV("PreProcessingFx_Command: command %d cmdSize %d",cmdCode, cmdSize);
+
+ switch (cmdCode){
+ case EFFECT_CMD_INIT:
+ if (pReplyData == NULL || *replySize != sizeof(int)){
+ return -EINVAL;
+ }
+ if (effect->ops->init) {
+ effect->ops->init(effect);
+ }
+ *(int *)pReplyData = 0;
+ break;
+
+ case EFFECT_CMD_CONFIGURE:
+ if (pCmdData == NULL||
+ cmdSize != sizeof(effect_config_t)||
+ pReplyData == NULL||
+ *replySize != sizeof(int)){
+ LOGV("PreProcessingFx_Command cmdCode Case: "
+ "EFFECT_CMD_CONFIGURE: ERROR");
+ return -EINVAL;
+ }
+ *(int *)pReplyData = Session_SetConfig(effect->session, (effect_config_t *)pCmdData);
+ if (*(int *)pReplyData != 0) {
+ break;
+ }
+ *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+ break;
+
+ case EFFECT_CMD_CONFIGURE_REVERSE:
+ if (pCmdData == NULL||
+ cmdSize != sizeof(effect_config_t)||
+ pReplyData == NULL||
+ *replySize != sizeof(int)){
+ LOGV("PreProcessingFx_Command cmdCode Case: "
+ "EFFECT_CMD_CONFIGURE_REVERSE: ERROR");
+ return -EINVAL;
+ }
+ *(int *)pReplyData = Session_SetReverseConfig(effect->session,
+ (effect_config_t *)pCmdData);
+ if (*(int *)pReplyData != 0) {
+ break;
+ }
+ break;
+
+ case EFFECT_CMD_RESET:
+ if (effect->ops->reset) {
+ effect->ops->reset(effect);
+ }
+ break;
+
+ case EFFECT_CMD_GET_PARAM:{
+ if (pCmdData == NULL ||
+ cmdSize < (int)sizeof(effect_param_t) ||
+ pReplyData == NULL ||
+ *replySize < (int)sizeof(effect_param_t)){
+ LOGV("PreProcessingFx_Command cmdCode Case: "
+ "EFFECT_CMD_GET_PARAM: ERROR");
+ return -EINVAL;
+ }
+ effect_param_t *p = (effect_param_t *)pCmdData;
+
+ memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + p->psize);
+
+ p = (effect_param_t *)pReplyData;
+
+ int voffset = ((p->psize - 1) / sizeof(int32_t) + 1) * sizeof(int32_t);
+
+ if (effect->ops->get_parameter) {
+ p->status = effect->ops->get_parameter(effect, p->data,
+ (size_t *)&p->vsize,
+ p->data + voffset);
+ *replySize = sizeof(effect_param_t) + voffset + p->vsize;
+ }
+ } break;
+
+ case EFFECT_CMD_SET_PARAM:{
+ if (pCmdData == NULL||
+ cmdSize < (int)sizeof(effect_param_t) ||
+ pReplyData == NULL ||
+ *replySize != sizeof(int32_t)){
+ LOGV("PreProcessingFx_Command cmdCode Case: "
+ "EFFECT_CMD_SET_PARAM: ERROR");
+ return -EINVAL;
+ }
+ effect_param_t *p = (effect_param_t *) pCmdData;
+
+ if (p->psize != sizeof(int32_t)){
+ LOGV("PreProcessingFx_Command cmdCode Case: "
+ "EFFECT_CMD_SET_PARAM: ERROR, psize is not sizeof(int32_t)");
+ return -EINVAL;
+ }
+ if (effect->ops->set_parameter) {
+ *(int *)pReplyData = effect->ops->set_parameter(effect,
+ (void *)p->data,
+ p->data + p->psize);
+ }
+ } break;
+
+ case EFFECT_CMD_ENABLE:
+ if (pReplyData == NULL || *replySize != sizeof(int)){
+ LOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_ENABLE: ERROR");
+ return -EINVAL;
+ }
+ *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_ACTIVE);
+ break;
+
+ case EFFECT_CMD_DISABLE:
+ if (pReplyData == NULL || *replySize != sizeof(int)){
+ LOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_DISABLE: ERROR");
+ return -EINVAL;
+ }
+ *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+ break;
+
+ case EFFECT_CMD_SET_DEVICE:
+ case EFFECT_CMD_SET_INPUT_DEVICE:
+ if (pCmdData == NULL ||
+ cmdSize != sizeof(uint32_t)) {
+ LOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_SET_DEVICE: ERROR");
+ return -EINVAL;
+ }
+
+ if (effect->ops->set_device) {
+ effect->ops->set_device(effect, *(uint32_t *)pCmdData);
+ }
+ break;
+
+ case EFFECT_CMD_SET_VOLUME:
+ case EFFECT_CMD_SET_AUDIO_MODE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+int PreProcessingFx_GetDescriptor(effect_handle_t self,
+ effect_descriptor_t *pDescriptor)
+{
+ preproc_effect_t * effect = (preproc_effect_t *) self;
+
+ if (effect == NULL || pDescriptor == NULL) {
+ return -EINVAL;
+ }
+
+ memcpy(pDescriptor, sDescriptors[effect->procId], sizeof(effect_descriptor_t));
+
+ return 0;
+}
+
+int PreProcessingFx_ProcessReverse(effect_handle_t self,
+ audio_buffer_t *inBuffer,
+ audio_buffer_t *outBuffer)
+{
+ preproc_effect_t * effect = (preproc_effect_t *)self;
+ int status = 0;
+
+ if (effect == NULL){
+ LOGW("PreProcessingFx_ProcessReverse() ERROR effect == NULL");
+ return -EINVAL;
+ }
+ preproc_session_t * session = (preproc_session_t *)effect->session;
+
+ if (inBuffer == NULL || inBuffer->raw == NULL){
+ LOGW("PreProcessingFx_ProcessReverse() ERROR bad pointer");
+ return -EINVAL;
+ }
+
+ session->revProcessedMsk |= (1<<effect->procId);
+
+// LOGV("PreProcessingFx_ProcessReverse In %d frames revEnabledMsk %08x revProcessedMsk %08x",
+// inBuffer->frameCount, session->revEnabledMsk, session->revProcessedMsk);
+
+
+ if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
+ effect->session->revProcessedMsk = 0;
+ if (session->revResampler != NULL) {
+ size_t fr = session->frameCount - session->framesRev;
+ if (inBuffer->frameCount < fr) {
+ fr = inBuffer->frameCount;
+ }
+ if (session->revBufSize < session->framesRev + fr) {
+ session->revBufSize = session->framesRev + fr;
+ session->revBuf = (int16_t *)realloc(session->revBuf,
+ session->revBufSize * session->inChannelCount * sizeof(int16_t));
+ }
+ memcpy(session->revBuf + session->framesRev * session->inChannelCount,
+ inBuffer->s16,
+ fr * session->inChannelCount * sizeof(int16_t));
+
+ session->framesRev += fr;
+ inBuffer->frameCount = fr;
+ if (session->framesRev < session->frameCount) {
+ return 0;
+ }
+ size_t frIn = session->framesRev;
+ size_t frOut = session->apmFrameCount;
+ if (session->inChannelCount == 1) {
+ speex_resampler_process_int(session->revResampler,
+ 0,
+ session->revBuf,
+ &frIn,
+ session->revFrame->_payloadData,
+ &frOut);
+ } else {
+ speex_resampler_process_interleaved_int(session->revResampler,
+ session->revBuf,
+ &frIn,
+ session->revFrame->_payloadData,
+ &frOut);
+ }
+ memcpy(session->revBuf,
+ session->revBuf + frIn * session->inChannelCount,
+ (session->framesRev - frIn) * session->inChannelCount * sizeof(int16_t));
+ session->framesRev -= frIn;
+ } else {
+ size_t fr = session->frameCount - session->framesRev;
+ if (inBuffer->frameCount < fr) {
+ fr = inBuffer->frameCount;
+ }
+ memcpy(session->revFrame->_payloadData + session->framesRev * session->inChannelCount,
+ inBuffer->s16,
+ fr * session->inChannelCount * sizeof(int16_t));
+ session->framesRev += fr;
+ inBuffer->frameCount = fr;
+ if (session->framesRev < session->frameCount) {
+ return 0;
+ }
+ session->framesRev = 0;
+ }
+ session->revFrame->_payloadDataLengthInSamples =
+ session->apmFrameCount * session->inChannelCount;
+ effect->session->apm->AnalyzeReverseStream(session->revFrame);
+ return 0;
+ } else {
+ return -ENODATA;
+ }
+}
+
+
+// effect_handle_t interface implementation for effect
+const struct effect_interface_s sEffectInterface = {
+ PreProcessingFx_Process,
+ PreProcessingFx_Command,
+ PreProcessingFx_GetDescriptor,
+ NULL
+};
+
+const struct effect_interface_s sEffectInterfaceReverse = {
+ PreProcessingFx_Process,
+ PreProcessingFx_Command,
+ PreProcessingFx_GetDescriptor,
+ PreProcessingFx_ProcessReverse
+};
+
+//------------------------------------------------------------------------------
+// Effect Library Interface Implementation
+//------------------------------------------------------------------------------
+
+int PreProcessingLib_QueryNumberEffects(uint32_t *pNumEffects)
+{
+ if (PreProc_Init() != 0) {
+ return sInitStatus;
+ }
+ if (pNumEffects == NULL) {
+ return -EINVAL;
+ }
+ *pNumEffects = PREPROC_NUM_EFFECTS;
+ return sInitStatus;
+}
+
+int PreProcessingLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor)
+{
+ if (PreProc_Init() != 0) {
+ return sInitStatus;
+ }
+ if (index >= PREPROC_NUM_EFFECTS) {
+ return -EINVAL;
+ }
+ memcpy(pDescriptor, sDescriptors[index], sizeof(effect_descriptor_t));
+ return 0;
+}
+
+int PreProcessingLib_Create(effect_uuid_t *uuid,
+ int32_t sessionId,
+ int32_t ioId,
+ effect_handle_t *pInterface)
+{
+ LOGV("EffectCreate: uuid: %08x session %d IO: %d", uuid->timeLow, sessionId, ioId);
+
+ int status;
+ const effect_descriptor_t *desc;
+ preproc_session_t *session;
+ uint32_t procId;
+
+ if (PreProc_Init() != 0) {
+ return sInitStatus;
+ }
+ desc = PreProc_GetDescriptor(uuid);
+ if (desc == NULL) {
+ LOGW("EffectCreate: fx not found uuid: %08x", uuid->timeLow);
+ return -EINVAL;
+ }
+ procId = UuidToProcId(&desc->type);
+
+ session = PreProc_GetSession(procId, sessionId, ioId);
+ if (session == NULL) {
+ LOGW("EffectCreate: no more session available");
+ return -EINVAL;
+ }
+
+ status = Session_CreateEffect(session, procId, pInterface);
+
+ if (status < 0 && session->createdMsk == 0) {
+ session->io = 0;
+ }
+ return status;
+}
+
+int PreProcessingLib_Release(effect_handle_t interface)
+{
+ int status;
+ LOGV("EffectRelease start %p", interface);
+ if (PreProc_Init() != 0) {
+ return sInitStatus;
+ }
+
+ preproc_effect_t *fx = (preproc_effect_t *)interface;
+
+ if (fx->session->io == 0) {
+ return -EINVAL;
+ }
+ return Session_ReleaseEffect(fx->session, fx);
+}
+
+int PreProcessingLib_GetDescriptor(effect_uuid_t *uuid,
+ effect_descriptor_t *pDescriptor) {
+
+ if (pDescriptor == NULL || uuid == NULL){
+ return -EINVAL;
+ }
+
+ const effect_descriptor_t *desc = PreProc_GetDescriptor(uuid);
+ if (desc == NULL) {
+ LOGV("PreProcessingLib_GetDescriptor() not found");
+ return -EINVAL;
+ }
+
+ LOGV("PreProcessingLib_GetDescriptor() got fx %s", desc->name);
+
+ memcpy(pDescriptor, desc, sizeof(effect_descriptor_t));
+ return 0;
+}
+
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+ tag : AUDIO_EFFECT_LIBRARY_TAG,
+ version : EFFECT_LIBRARY_API_VERSION,
+ name : "Audio Preprocessing Library",
+ implementor : "The Android Open Source Project",
+ query_num_effects : PreProcessingLib_QueryNumberEffects,
+ query_effect : PreProcessingLib_QueryEffect,
+ create_effect : PreProcessingLib_Create,
+ release_effect : PreProcessingLib_Release,
+ get_descriptor : PreProcessingLib_GetDescriptor
+};
+
+}; // extern "C"
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 8d98900..3919551 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -47,11 +47,11 @@
effect_callback_t cbf,
void* user,
int sessionId,
- audio_io_handle_t output
+ audio_io_handle_t io
)
: mStatus(NO_INIT)
{
- mStatus = set(type, uuid, priority, cbf, user, sessionId, output);
+ mStatus = set(type, uuid, priority, cbf, user, sessionId, io);
}
AudioEffect::AudioEffect(const char *typeStr,
@@ -60,7 +60,7 @@
effect_callback_t cbf,
void* user,
int sessionId,
- audio_io_handle_t output
+ audio_io_handle_t io
)
: mStatus(NO_INIT)
{
@@ -83,7 +83,7 @@
}
}
- mStatus = set(pType, pUuid, priority, cbf, user, sessionId, output);
+ mStatus = set(pType, pUuid, priority, cbf, user, sessionId, io);
}
status_t AudioEffect::set(const effect_uuid_t *type,
@@ -92,13 +92,13 @@
effect_callback_t cbf,
void* user,
int sessionId,
- audio_io_handle_t output)
+ audio_io_handle_t io)
{
sp<IEffect> iEffect;
sp<IMemory> cblk;
int enabled;
- LOGV("set %p mUserData: %p", this, user);
+ LOGV("set %p mUserData: %p uuid: %p timeLow %08x", this, user, type, type ? type->timeLow : 0);
if (mIEffect != 0) {
LOGW("Effect already in use");
@@ -135,7 +135,7 @@
mIEffectClient = new EffectClient(this);
iEffect = audioFlinger->createEffect(getpid(), (effect_descriptor_t *)&mDescriptor,
- mIEffectClient, priority, output, mSessionId, &mStatus, &mId, &enabled);
+ mIEffectClient, priority, io, mSessionId, &mStatus, &mId, &enabled);
if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
LOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 4c4aad0..1ec596e 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -162,8 +162,19 @@
int channelCount = popcount(channelMask);
+ if (sessionId == 0 ) {
+ mSessionId = AudioSystem::newAudioSessionId();
+ } else {
+ mSessionId = sessionId;
+ }
+ LOGV("set(): mSessionId %d", mSessionId);
+
audio_io_handle_t input = AudioSystem::getInput(inputSource,
- sampleRate, format, channelMask, (audio_in_acoustics_t)flags);
+ sampleRate,
+ format,
+ channelMask,
+ (audio_in_acoustics_t)flags,
+ mSessionId);
if (input == 0) {
LOGE("Could not get audio input for record source %d", inputSource);
return BAD_VALUE;
@@ -187,8 +198,6 @@
notificationFrames = frameCount/2;
}
- mSessionId = sessionId;
-
// create the IAudioRecord
status = openRecord_l(sampleRate, format, channelMask,
frameCount, flags, input);
@@ -589,8 +598,10 @@
{
mInput = AudioSystem::getInput(mInputSource,
mCblk->sampleRate,
- mFormat, mChannelMask,
- (audio_in_acoustics_t)mFlags);
+ mFormat,
+ mChannelMask,
+ (audio_in_acoustics_t)mFlags,
+ mSessionId);
return mInput;
}
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 6cb3847..5009957 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -605,11 +605,12 @@
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- audio_in_acoustics_t acoustics)
+ audio_in_acoustics_t acoustics,
+ int sessionId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
- return aps->getInput(inputSource, samplingRate, format, channels, acoustics);
+ return aps->getInput(inputSource, samplingRate, format, channels, acoustics, sessionId);
}
status_t AudioSystem::startInput(audio_io_handle_t input)
@@ -678,14 +679,14 @@
}
status_t AudioSystem::registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->registerEffect(desc, output, strategy, session, id);
+ return aps->registerEffect(desc, io, strategy, session, id);
}
status_t AudioSystem::unregisterEffect(int id)
@@ -695,9 +696,11 @@
return aps->unregisterEffect(id);
}
-status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs) {
+status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs)
+{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
+ if (state == NULL) return BAD_VALUE;
*state = aps->isStreamActive(stream, inPastMs);
return NO_ERROR;
}
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 9fbcee0..49d410f 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -184,7 +184,8 @@
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- audio_in_acoustics_t acoustics)
+ audio_in_acoustics_t acoustics,
+ int audioSession)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -193,6 +194,7 @@
data.writeInt32(static_cast <uint32_t>(format));
data.writeInt32(channels);
data.writeInt32(static_cast <uint32_t>(acoustics));
+ data.writeInt32(audioSession);
remote()->transact(GET_INPUT, data, &reply);
return static_cast <audio_io_handle_t> (reply.readInt32());
}
@@ -285,7 +287,7 @@
}
virtual status_t registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id)
@@ -293,7 +295,7 @@
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.write(desc, sizeof(effect_descriptor_t));
- data.writeInt32(output);
+ data.writeInt32(io);
data.writeInt32(strategy);
data.writeInt32(session);
data.writeInt32(id);
@@ -439,11 +441,13 @@
uint32_t channels = data.readInt32();
audio_in_acoustics_t acoustics =
static_cast <audio_in_acoustics_t>(data.readInt32());
+ int audioSession = data.readInt32();
audio_io_handle_t input = getInput(inputSource,
samplingRate,
format,
channels,
- acoustics);
+ acoustics,
+ audioSession);
reply->writeInt32(static_cast <int>(input));
return NO_ERROR;
} break;
@@ -528,12 +532,12 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
effect_descriptor_t desc;
data.read(&desc, sizeof(effect_descriptor_t));
- audio_io_handle_t output = data.readInt32();
+ audio_io_handle_t io = data.readInt32();
uint32_t strategy = data.readInt32();
int session = data.readInt32();
int id = data.readInt32();
reply->writeInt32(static_cast <int32_t>(registerEffect(&desc,
- output,
+ io,
strategy,
session,
id)));
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index a44ef5a..7e44c29 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -23,14 +23,17 @@
#include <camera/ICamera.h>
#include <media/IMediaRecorderClient.h>
#include <media/IMediaRecorder.h>
+#include <gui/ISurfaceTexture.h>
#include <unistd.h>
+
namespace android {
enum {
RELEASE = IBinder::FIRST_CALL_TRANSACTION,
INIT,
CLOSE,
+ QUERY_SURFACE_MEDIASOURCE,
RESET,
STOP,
START,
@@ -71,6 +74,19 @@
return reply.readInt32();
}
+ sp<ISurfaceTexture> querySurfaceMediaSource()
+ {
+ LOGV("Query SurfaceMediaSource");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ remote()->transact(QUERY_SURFACE_MEDIASOURCE, data, &reply);
+ int returnedNull = reply.readInt32();
+ if (returnedNull) {
+ return NULL;
+ }
+ return interface_cast<ISurfaceTexture>(reply.readStrongBinder());
+ }
+
status_t setPreviewSurface(const sp<Surface>& surface)
{
LOGV("setPreviewSurface(%p)", surface.get());
@@ -440,6 +456,20 @@
reply->writeInt32(setCamera(camera, proxy));
return NO_ERROR;
} break;
+ case QUERY_SURFACE_MEDIASOURCE: {
+ LOGV("QUERY_SURFACE_MEDIASOURCE");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ // call the mediaserver side to create
+ // a surfacemediasource
+ sp<ISurfaceTexture> surfaceMediaSource = querySurfaceMediaSource();
+ // The mediaserver might have failed to create a source
+ int returnedNull= (surfaceMediaSource == NULL) ? 1 : 0 ;
+ reply->writeInt32(returnedNull);
+ if (!returnedNull) {
+ reply->writeStrongBinder(surfaceMediaSource->asBinder());
+ }
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 069bbb7..f0f07a2 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -132,6 +132,16 @@
LOGV("codec = %d", cap.mCodec);
}
+/*static*/ void
+MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap)
+{
+ LOGV("videoeditor cap:");
+ LOGV("mMaxInputFrameWidth = %d", cap.mMaxInputFrameWidth);
+ LOGV("mMaxInputFrameHeight = %d", cap.mMaxInputFrameHeight);
+ LOGV("mMaxOutputFrameWidth = %d", cap.mMaxOutputFrameWidth);
+ LOGV("mMaxOutputFrameHeight = %d", cap.mMaxOutputFrameHeight);
+}
+
/*static*/ int
MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings, const char *name)
{
@@ -368,6 +378,24 @@
mStartTimeOffsets.replaceValueFor(cameraId, offsetTimeMs);
}
+/*static*/ MediaProfiles::VideoEditorCap*
+MediaProfiles::createVideoEditorCap(const char **atts, MediaProfiles *profiles)
+{
+ CHECK(!strcmp("maxInputFrameWidth", atts[0]) &&
+ !strcmp("maxInputFrameHeight", atts[2]) &&
+ !strcmp("maxOutputFrameWidth", atts[4]) &&
+ !strcmp("maxOutputFrameHeight", atts[6]));
+
+ MediaProfiles::VideoEditorCap *pVideoEditorCap =
+ new MediaProfiles::VideoEditorCap(atoi(atts[1]), atoi(atts[3]),
+ atoi(atts[5]), atoi(atts[7]));
+
+ logVideoEditorCap(*pVideoEditorCap);
+ profiles->mVideoEditorCap = pVideoEditorCap;
+
+ return pVideoEditorCap;
+}
+
/*static*/ void
MediaProfiles::startElementHandler(void *userData, const char *name, const char **atts)
{
@@ -398,6 +426,8 @@
createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds));
} else if (strcmp("ImageEncoding", name) == 0) {
profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts);
+ } else if (strcmp("VideoEditorCap", name) == 0) {
+ createVideoEditorCap(atts, profiles);
}
}
@@ -790,6 +820,17 @@
profiles->mImageEncodingQualityLevels.add(levels);
}
+/*static*/ void
+MediaProfiles::createDefaultVideoEditorCap(MediaProfiles *profiles)
+{
+ profiles->mVideoEditorCap =
+ new MediaProfiles::VideoEditorCap(
+ VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH,
+ VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT,
+ VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH,
+ VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT);
+}
+
/*static*/ MediaProfiles*
MediaProfiles::createDefaultInstance()
{
@@ -801,6 +842,7 @@
createDefaultAudioDecoders(profiles);
createDefaultEncoderOutputFileFormats(profiles);
createDefaultImageEncodingQualityLevels(profiles);
+ createDefaultVideoEditorCap(profiles);
return profiles;
}
@@ -899,6 +941,28 @@
return -1;
}
+int MediaProfiles::getVideoEditorCapParamByName(const char *name) const
+{
+ LOGV("getVideoEditorCapParamByName: %s", name);
+
+ if (mVideoEditorCap == NULL) {
+ LOGE("The mVideoEditorCap is not created, then create default cap.");
+ createDefaultVideoEditorCap(sInstance);
+ }
+
+ if (!strcmp("videoeditor.input.width.max", name))
+ return mVideoEditorCap->mMaxInputFrameWidth;
+ if (!strcmp("videoeditor.input.height.max", name))
+ return mVideoEditorCap->mMaxInputFrameHeight;
+ if (!strcmp("videoeditor.output.width.max", name))
+ return mVideoEditorCap->mMaxOutputFrameWidth;
+ if (!strcmp("videoeditor.output.height.max", name))
+ return mVideoEditorCap->mMaxOutputFrameHeight;
+
+ LOGE("The given video editor param name %s is not found", name);
+ return -1;
+}
+
Vector<audio_encoder> MediaProfiles::getAudioEncoders() const
{
Vector<audio_encoder> encoders;
diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp
index 45bdff4..41f8593 100644
--- a/media/libmedia/MediaScanner.cpp
+++ b/media/libmedia/MediaScanner.cpp
@@ -47,16 +47,15 @@
return mLocale;
}
-status_t MediaScanner::processDirectory(
- const char *path, MediaScannerClient &client,
- ExceptionCheck exceptionCheck, void *exceptionEnv) {
+MediaScanResult MediaScanner::processDirectory(
+ const char *path, MediaScannerClient &client) {
int pathLength = strlen(path);
if (pathLength >= PATH_MAX) {
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_SKIPPED;
}
char* pathBuffer = (char *)malloc(PATH_MAX + 1);
if (!pathBuffer) {
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_ERROR;
}
int pathRemaining = PATH_MAX - pathLength;
@@ -69,21 +68,18 @@
client.setLocale(locale());
- status_t result =
- doProcessDirectory(pathBuffer, pathRemaining, client, false, exceptionCheck, exceptionEnv);
+ MediaScanResult result = doProcessDirectory(pathBuffer, pathRemaining, client, false);
free(pathBuffer);
return result;
}
-status_t MediaScanner::doProcessDirectory(
- char *path, int pathRemaining, MediaScannerClient &client,
- bool noMedia, ExceptionCheck exceptionCheck, void *exceptionEnv) {
+MediaScanResult MediaScanner::doProcessDirectory(
+ char *path, int pathRemaining, MediaScannerClient &client, bool noMedia) {
// place to copy file or directory name
char* fileSpot = path + strlen(path);
struct dirent* entry;
- struct stat statbuf;
// Treat all files as non-media in directories that contain a ".nomedia" file
if (pathRemaining >= 8 /* strlen(".nomedia") */ ) {
@@ -99,76 +95,88 @@
DIR* dir = opendir(path);
if (!dir) {
- LOGD("opendir %s failed, errno: %d", path, errno);
- return UNKNOWN_ERROR;
+ LOGW("Error opening directory '%s', skipping: %s.", path, strerror(errno));
+ return MEDIA_SCAN_RESULT_SKIPPED;
}
+ MediaScanResult result = MEDIA_SCAN_RESULT_OK;
while ((entry = readdir(dir))) {
- const char* name = entry->d_name;
-
- // ignore "." and ".."
- if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) {
- continue;
+ if (doProcessDirectoryEntry(path, pathRemaining, client, noMedia, entry, fileSpot)
+ == MEDIA_SCAN_RESULT_ERROR) {
+ result = MEDIA_SCAN_RESULT_ERROR;
+ break;
}
+ }
+ closedir(dir);
+ return result;
+}
- int nameLength = strlen(name);
- if (nameLength + 1 > pathRemaining) {
- // path too long!
- continue;
+MediaScanResult MediaScanner::doProcessDirectoryEntry(
+ char *path, int pathRemaining, MediaScannerClient &client, bool noMedia,
+ struct dirent* entry, char* fileSpot) {
+ struct stat statbuf;
+ const char* name = entry->d_name;
+
+ // ignore "." and ".."
+ if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) {
+ return MEDIA_SCAN_RESULT_SKIPPED;
+ }
+
+ int nameLength = strlen(name);
+ if (nameLength + 1 > pathRemaining) {
+ // path too long!
+ return MEDIA_SCAN_RESULT_SKIPPED;
+ }
+ strcpy(fileSpot, name);
+
+ int type = entry->d_type;
+ if (type == DT_UNKNOWN) {
+ // If the type is unknown, stat() the file instead.
+ // This is sometimes necessary when accessing NFS mounted filesystems, but
+ // could be needed in other cases well.
+ if (stat(path, &statbuf) == 0) {
+ if (S_ISREG(statbuf.st_mode)) {
+ type = DT_REG;
+ } else if (S_ISDIR(statbuf.st_mode)) {
+ type = DT_DIR;
+ }
+ } else {
+ LOGD("stat() failed for %s: %s", path, strerror(errno) );
}
- strcpy(fileSpot, name);
+ }
+ if (type == DT_DIR) {
+ bool childNoMedia = noMedia;
+ // set noMedia flag on directories with a name that starts with '.'
+ // for example, the Mac ".Trashes" directory
+ if (name[0] == '.')
+ childNoMedia = true;
- int type = entry->d_type;
- if (type == DT_UNKNOWN) {
- // If the type is unknown, stat() the file instead.
- // This is sometimes necessary when accessing NFS mounted filesystems, but
- // could be needed in other cases well.
- if (stat(path, &statbuf) == 0) {
- if (S_ISREG(statbuf.st_mode)) {
- type = DT_REG;
- } else if (S_ISDIR(statbuf.st_mode)) {
- type = DT_DIR;
- }
- } else {
- LOGD("stat() failed for %s: %s", path, strerror(errno) );
+ // report the directory to the client
+ if (stat(path, &statbuf) == 0) {
+ status_t status = client.scanFile(path, statbuf.st_mtime, 0,
+ true /*isDirectory*/, childNoMedia);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
}
}
- if (type == DT_REG || type == DT_DIR) {
- if (type == DT_DIR) {
- bool childNoMedia = noMedia;
- // set noMedia flag on directories with a name that starts with '.'
- // for example, the Mac ".Trashes" directory
- if (name[0] == '.')
- childNoMedia = true;
- // report the directory to the client
- if (stat(path, &statbuf) == 0) {
- client.scanFile(path, statbuf.st_mtime, 0, true, childNoMedia);
- }
-
- // and now process its contents
- strcat(fileSpot, "/");
- int err = doProcessDirectory(path, pathRemaining - nameLength - 1, client,
- childNoMedia, exceptionCheck, exceptionEnv);
- if (err) {
- // pass exceptions up - ignore other errors
- if (exceptionCheck && exceptionCheck(exceptionEnv)) goto failure;
- LOGE("Error processing '%s' - skipping\n", path);
- continue;
- }
- } else {
- stat(path, &statbuf);
- client.scanFile(path, statbuf.st_mtime, statbuf.st_size, false, noMedia);
- if (exceptionCheck && exceptionCheck(exceptionEnv)) goto failure;
- }
+ // and now process its contents
+ strcat(fileSpot, "/");
+ MediaScanResult result = doProcessDirectory(path, pathRemaining - nameLength - 1,
+ client, childNoMedia);
+ if (result == MEDIA_SCAN_RESULT_ERROR) {
+ return MEDIA_SCAN_RESULT_ERROR;
+ }
+ } else if (type == DT_REG) {
+ stat(path, &statbuf);
+ status_t status = client.scanFile(path, statbuf.st_mtime, statbuf.st_size,
+ false /*isDirectory*/, noMedia);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
}
}
- closedir(dir);
- return OK;
-failure:
- closedir(dir);
- return -1;
+ return MEDIA_SCAN_RESULT_OK;
}
} // namespace android
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index bd3596e..7a7aeb6 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -62,7 +62,7 @@
mValues = new StringArray;
}
-bool MediaScannerClient::addStringTag(const char* name, const char* value)
+status_t MediaScannerClient::addStringTag(const char* name, const char* value)
{
if (mLocaleEncoding != kEncodingNone) {
// don't bother caching strings that are all ASCII.
@@ -212,8 +212,10 @@
// finally, push all name/value pairs to the client
for (int i = 0; i < mNames->size(); i++) {
- if (!handleStringTag(mNames->getEntry(i), mValues->getEntry(i)))
+ status_t status = handleStringTag(mNames->getEntry(i), mValues->getEntry(i));
+ if (status) {
break;
+ }
}
}
// else addStringTag() has done all the work so we have nothing to do
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 7b7ba74..a11fb80 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -27,6 +27,8 @@
#include <binder/IServiceManager.h>
#include <binder/IPCThreadState.h>
+#include <gui/SurfaceTextureClient.h>
+
#include <media/mediaplayer.h>
#include <media/AudioTrack.h>
@@ -38,6 +40,7 @@
#include <utils/String8.h>
#include <system/audio.h>
+#include <system/window.h>
namespace android {
@@ -81,6 +84,8 @@
if (p != 0) {
p->disconnect();
}
+
+ disconnectNativeWindow();
}
// always call with lock held
@@ -194,13 +199,63 @@
return mPlayer->getMetadata(update_only, apply_filter, metadata);
}
+void MediaPlayer::disconnectNativeWindow() {
+ if (mConnectedWindow != NULL) {
+ status_t err = native_window_disconnect(mConnectedWindow.get(),
+ NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ LOGW("native_window_disconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
+ }
+ mConnectedWindow.clear();
+}
+
status_t MediaPlayer::setVideoSurface(const sp<Surface>& surface)
{
LOGV("setVideoSurface");
Mutex::Autolock _l(mLock);
if (mPlayer == 0) return NO_INIT;
- return mPlayer->setVideoSurface(surface);
+ sp<IBinder> binder(surface == NULL ? NULL : surface->asBinder());
+ if (mConnectedWindowBinder == binder) {
+ return OK;
+ }
+
+ if (surface != NULL) {
+ status_t err = native_window_connect(surface.get(),
+ NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ LOGE("setVideoSurface failed: %d", err);
+ // Note that we must do the reset before disconnecting from the ANW.
+ // Otherwise queue/dequeue calls could be made on the disconnected
+ // ANW, which may result in errors.
+ reset_l();
+
+ disconnectNativeWindow();
+
+ return err;
+ }
+ }
+
+ // Note that we must set the player's new surface before disconnecting the
+ // old one. Otherwise queue/dequeue calls could be made on the disconnected
+ // ANW, which may result in errors.
+ status_t err = mPlayer->setVideoSurface(surface);
+
+ disconnectNativeWindow();
+
+ mConnectedWindow = surface;
+
+ if (err == OK) {
+ mConnectedWindowBinder = binder;
+ } else {
+ disconnectNativeWindow();
+ }
+
+ return err;
}
status_t MediaPlayer::setVideoSurfaceTexture(
@@ -210,7 +265,47 @@
Mutex::Autolock _l(mLock);
if (mPlayer == 0) return NO_INIT;
- return mPlayer->setVideoSurfaceTexture(surfaceTexture);
+ sp<IBinder> binder(surfaceTexture == NULL ? NULL :
+ surfaceTexture->asBinder());
+ if (mConnectedWindowBinder == binder) {
+ return OK;
+ }
+
+ sp<ANativeWindow> anw;
+ if (surfaceTexture != NULL) {
+ anw = new SurfaceTextureClient(surfaceTexture);
+ status_t err = native_window_connect(anw.get(),
+ NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ LOGE("setVideoSurfaceTexture failed: %d", err);
+ // Note that we must do the reset before disconnecting from the ANW.
+ // Otherwise queue/dequeue calls could be made on the disconnected
+ // ANW, which may result in errors.
+ reset_l();
+
+ disconnectNativeWindow();
+
+ return err;
+ }
+ }
+
+ // Note that we must set the player's new SurfaceTexture before
+ // disconnecting the old one. Otherwise queue/dequeue calls could be made
+ // on the disconnected ANW, which may result in errors.
+ status_t err = mPlayer->setVideoSurfaceTexture(surfaceTexture);
+
+ disconnectNativeWindow();
+
+ mConnectedWindow = anw;
+
+ if (err == OK) {
+ mConnectedWindowBinder = binder;
+ } else {
+ disconnectNativeWindow();
+ }
+
+ return err;
}
// must call with lock held
@@ -434,10 +529,8 @@
return result;
}
-status_t MediaPlayer::reset()
+status_t MediaPlayer::reset_l()
{
- LOGV("reset");
- Mutex::Autolock _l(mLock);
mLoop = false;
if (mCurrentState == MEDIA_PLAYER_IDLE) return NO_ERROR;
mPrepareSync = false;
@@ -458,6 +551,13 @@
return NO_ERROR;
}
+status_t MediaPlayer::reset()
+{
+ LOGV("reset");
+ Mutex::Autolock _l(mLock);
+ return reset_l();
+}
+
status_t MediaPlayer::setAudioStreamType(int type)
{
LOGV("MediaPlayer::setAudioStreamType");
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 9e4edd0..fab674c 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -25,6 +25,7 @@
#include <media/IMediaPlayerService.h>
#include <media/IMediaRecorder.h>
#include <media/mediaplayer.h> // for MEDIA_ERROR_SERVER_DIED
+#include <gui/ISurfaceTexture.h>
namespace android {
@@ -127,7 +128,9 @@
return INVALID_OPERATION;
}
+ // following call is made over the Binder Interface
status_t ret = mMediaRecorder->setVideoSource(vs);
+
if (OK != ret) {
LOGV("setVideoSource failed: %d", ret);
mCurrentState = MEDIA_RECORDER_ERROR;
@@ -357,7 +360,7 @@
return INVALID_OPERATION;
}
if (!mIsVideoSourceSet) {
- LOGE("try to set video size without setting video source first");
+ LOGE("Cannot set video size without setting video source first");
return INVALID_OPERATION;
}
@@ -367,9 +370,27 @@
mCurrentState = MEDIA_RECORDER_ERROR;
return ret;
}
+
return ret;
}
+// Query a SurfaceMediaSurface through the Mediaserver, over the
+// binder interface. This is used by the Filter Framework (MeidaEncoder)
+// to get an <ISurfaceTexture> object to hook up to ANativeWindow.
+sp<ISurfaceTexture> MediaRecorder::
+ querySurfaceMediaSourceFromMediaServer()
+{
+ Mutex::Autolock _l(mLock);
+ mSurfaceMediaSource =
+ mMediaRecorder->querySurfaceMediaSource();
+ if (mSurfaceMediaSource == NULL) {
+ LOGE("SurfaceMediaSource could not be initialized!");
+ }
+ return mSurfaceMediaSource;
+}
+
+
+
status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
{
LOGV("setVideoFrameRate(%d)", frames_per_second);
@@ -382,7 +403,7 @@
return INVALID_OPERATION;
}
if (!mIsVideoSourceSet) {
- LOGE("try to set video frame rate without setting video source first");
+ LOGE("Cannot set video frame rate without setting video source first");
return INVALID_OPERATION;
}
@@ -621,7 +642,7 @@
return INVALID_OPERATION;
}
-MediaRecorder::MediaRecorder()
+MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL)
{
LOGV("constructor");
@@ -632,6 +653,8 @@
if (mMediaRecorder != NULL) {
mCurrentState = MEDIA_RECORDER_IDLE;
}
+
+
doCleanUp();
}
@@ -646,6 +669,10 @@
if (mMediaRecorder != NULL) {
mMediaRecorder.clear();
}
+
+ if (mSurfaceMediaSource != NULL) {
+ mSurfaceMediaSource.clear();
+ }
}
status_t MediaRecorder::setListener(const sp<MediaRecorderListener>& listener)
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 115db1a..905b885 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -41,6 +41,7 @@
#include "MediaPlayerService.h"
#include "StagefrightRecorder.h"
+#include <gui/ISurfaceTexture.h>
namespace android {
@@ -57,6 +58,20 @@
return ok;
}
+
+sp<ISurfaceTexture> MediaRecorderClient::querySurfaceMediaSource()
+{
+ LOGV("Query SurfaceMediaSource");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder == NULL) {
+ LOGE("recorder is not initialized");
+ return NULL;
+ }
+ return mRecorder->querySurfaceMediaSource();
+}
+
+
+
status_t MediaRecorderClient::setCamera(const sp<ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy)
{
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index bbca529..c87a3c0 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -25,45 +25,51 @@
class MediaRecorderBase;
class MediaPlayerService;
class ICameraRecordingProxy;
+class ISurfaceTexture;
class MediaRecorderClient : public BnMediaRecorder
{
public:
- virtual status_t setCamera(const sp<ICamera>& camera,
- const sp<ICameraRecordingProxy>& proxy);
- virtual status_t setPreviewSurface(const sp<Surface>& surface);
- virtual status_t setVideoSource(int vs);
- virtual status_t setAudioSource(int as);
- virtual status_t setOutputFormat(int of);
- virtual status_t setVideoEncoder(int ve);
- virtual status_t setAudioEncoder(int ae);
- virtual status_t setOutputFile(const char* path);
- virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
- virtual status_t setOutputFileAuxiliary(int fd);
- virtual status_t setVideoSize(int width, int height);
- virtual status_t setVideoFrameRate(int frames_per_second);
- virtual status_t setParameters(const String8& params);
- virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
- virtual status_t prepare();
- virtual status_t getMaxAmplitude(int* max);
- virtual status_t start();
- virtual status_t stop();
- virtual status_t reset();
- virtual status_t init();
- virtual status_t close();
- virtual status_t release();
+ virtual status_t setCamera(const sp<ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy);
+ virtual status_t setPreviewSurface(const sp<Surface>& surface);
+ virtual status_t setVideoSource(int vs);
+ virtual status_t setAudioSource(int as);
+ virtual status_t setOutputFormat(int of);
+ virtual status_t setVideoEncoder(int ve);
+ virtual status_t setAudioEncoder(int ae);
+ virtual status_t setOutputFile(const char* path);
+ virtual status_t setOutputFile(int fd, int64_t offset,
+ int64_t length);
+ virtual status_t setOutputFileAuxiliary(int fd);
+ virtual status_t setVideoSize(int width, int height);
+ virtual status_t setVideoFrameRate(int frames_per_second);
+ virtual status_t setParameters(const String8& params);
+ virtual status_t setListener(
+ const sp<IMediaRecorderClient>& listener);
+ virtual status_t prepare();
+ virtual status_t getMaxAmplitude(int* max);
+ virtual status_t start();
+ virtual status_t stop();
+ virtual status_t reset();
+ virtual status_t init();
+ virtual status_t close();
+ virtual status_t release();
+ virtual status_t dump(int fd, const Vector<String16>& args) const;
+ virtual sp<ISurfaceTexture> querySurfaceMediaSource();
- virtual status_t dump(int fd, const Vector<String16>& args) const;
private:
- friend class MediaPlayerService; // for accessing private constructor
+ friend class MediaPlayerService; // for accessing private constructor
- MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid);
- virtual ~MediaRecorderClient();
+ MediaRecorderClient(
+ const sp<MediaPlayerService>& service,
+ pid_t pid);
+ virtual ~MediaRecorderClient();
- pid_t mPid;
- Mutex mLock;
- MediaRecorderBase *mRecorder;
- sp<MediaPlayerService> mMediaPlayerService;
+ pid_t mPid;
+ Mutex mLock;
+ MediaRecorderBase *mRecorder;
+ sp<MediaPlayerService> mMediaPlayerService;
};
}; // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 223e0be..6427bb7 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -38,10 +38,12 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SurfaceMediaSource.h>
#include <media/MediaProfiles.h>
#include <camera/ICamera.h>
#include <camera/CameraParameters.h>
#include <surfaceflinger/Surface.h>
+
#include <utils/Errors.h>
#include <sys/types.h>
#include <ctype.h>
@@ -69,7 +71,7 @@
mOutputFd(-1), mOutputFdAux(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false) {
+ mStarted(false), mSurfaceMediaSource(NULL) {
LOGV("Constructor");
reset();
@@ -85,6 +87,14 @@
return OK;
}
+// The client side of mediaserver asks it to creat a SurfaceMediaSource
+// and return a interface reference. The client side will use that
+// while encoding GL Frames
+sp<ISurfaceTexture> StagefrightRecorder::querySurfaceMediaSource() const {
+ LOGV("Get SurfaceMediaSource");
+ return mSurfaceMediaSource;
+}
+
status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
LOGV("setAudioSource: %d", as);
if (as < AUDIO_SOURCE_DEFAULT ||
@@ -1006,13 +1016,13 @@
source = createAudioSource();
} else {
- sp<CameraSource> cameraSource;
- status_t err = setupCameraSource(&cameraSource);
+ sp<MediaSource> mediaSource;
+ status_t err = setupMediaSource(&mediaSource);
if (err != OK) {
return err;
}
- err = setupVideoEncoder(cameraSource, mVideoBitRate, &source);
+ err = setupVideoEncoder(mediaSource, mVideoBitRate, &source);
if (err != OK) {
return err;
}
@@ -1042,20 +1052,19 @@
}
}
- if (mVideoSource == VIDEO_SOURCE_DEFAULT
- || mVideoSource == VIDEO_SOURCE_CAMERA) {
+ if (mVideoSource < VIDEO_SOURCE_LIST_END) {
if (mVideoEncoder != VIDEO_ENCODER_H264) {
return ERROR_UNSUPPORTED;
}
- sp<CameraSource> cameraSource;
- status_t err = setupCameraSource(&cameraSource);
+ sp<MediaSource> mediaSource;
+ status_t err = setupMediaSource(&mediaSource);
if (err != OK) {
return err;
}
sp<MediaSource> encoder;
- err = setupVideoEncoder(cameraSource, mVideoBitRate, &encoder);
+ err = setupVideoEncoder(mediaSource, mVideoBitRate, &encoder);
if (err != OK) {
return err;
@@ -1289,6 +1298,60 @@
}
}
+// Set up the appropriate MediaSource depending on the chosen option
+status_t StagefrightRecorder::setupMediaSource(
+ sp<MediaSource> *mediaSource) {
+ if (mVideoSource == VIDEO_SOURCE_DEFAULT
+ || mVideoSource == VIDEO_SOURCE_CAMERA) {
+ sp<CameraSource> cameraSource;
+ status_t err = setupCameraSource(&cameraSource);
+ if (err != OK) {
+ return err;
+ }
+ *mediaSource = cameraSource;
+ } else if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
+ // If using GRAlloc buffers, setup surfacemediasource.
+ // Later a handle to that will be passed
+ // to the client side when queried
+ status_t err = setupSurfaceMediaSource();
+ if (err != OK) {
+ return err;
+ }
+ *mediaSource = mSurfaceMediaSource;
+ } else {
+ return INVALID_OPERATION;
+ }
+ return OK;
+}
+
+// setupSurfaceMediaSource creates a source with the given
+// width and height and framerate.
+// TODO: This could go in a static function inside SurfaceMediaSource
+// similar to that in CameraSource
+status_t StagefrightRecorder::setupSurfaceMediaSource() {
+ status_t err = OK;
+ mSurfaceMediaSource = new SurfaceMediaSource(mVideoWidth, mVideoHeight);
+ if (mSurfaceMediaSource == NULL) {
+ return NO_INIT;
+ }
+
+ if (mFrameRate == -1) {
+ int32_t frameRate = 0;
+ CHECK (mSurfaceMediaSource->getFormat()->findInt32(
+ kKeyFrameRate, &frameRate));
+ LOGI("Frame rate is not explicitly set. Use the current frame "
+ "rate (%d fps)", frameRate);
+ mFrameRate = frameRate;
+ } else {
+ err = mSurfaceMediaSource->setFrameRate(mFrameRate);
+ }
+ CHECK(mFrameRate != -1);
+
+ mIsMetaDataStoredInVideoBuffers =
+ mSurfaceMediaSource->isMetaDataStoredInVideoBuffers();
+ return err;
+}
+
status_t StagefrightRecorder::setupCameraSource(
sp<CameraSource> *cameraSource) {
status_t err = OK;
@@ -1465,29 +1528,37 @@
status_t err = OK;
sp<MediaWriter> writer = new MPEG4Writer(outputFd);
- if (mVideoSource == VIDEO_SOURCE_DEFAULT
- || mVideoSource == VIDEO_SOURCE_CAMERA) {
+ if (mVideoSource < VIDEO_SOURCE_LIST_END) {
- sp<MediaSource> cameraMediaSource;
+ sp<MediaSource> mediaSource;
if (useSplitCameraSource) {
+ // TODO: Check if there is a better way to handle this
+ if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
+ LOGE("Cannot use split camera when encoding frames");
+ return INVALID_OPERATION;
+ }
LOGV("Using Split camera source");
- cameraMediaSource = mCameraSourceSplitter->createClient();
+ mediaSource = mCameraSourceSplitter->createClient();
} else {
- sp<CameraSource> cameraSource;
- err = setupCameraSource(&cameraSource);
- cameraMediaSource = cameraSource;
+ err = setupMediaSource(&mediaSource);
}
+
if ((videoWidth != mVideoWidth) || (videoHeight != mVideoHeight)) {
+ // TODO: Might be able to handle downsampling even if using GRAlloc
+ if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
+ LOGE("Cannot change size or Downsample when encoding frames");
+ return INVALID_OPERATION;
+ }
// Use downsampling from the original source.
- cameraMediaSource =
- new VideoSourceDownSampler(cameraMediaSource, videoWidth, videoHeight);
+ mediaSource =
+ new VideoSourceDownSampler(mediaSource, videoWidth, videoHeight);
}
if (err != OK) {
return err;
}
sp<MediaSource> encoder;
- err = setupVideoEncoder(cameraMediaSource, videoBitRate, &encoder);
+ err = setupVideoEncoder(mediaSource, videoBitRate, &encoder);
if (err != OK) {
return err;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 034b373..1618b92 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -36,6 +36,8 @@
class MetaData;
struct AudioSource;
class MediaProfiles;
+class ISurfaceTexture;
+class SurfaceMediaSource;
struct StagefrightRecorder : public MediaRecorderBase {
StagefrightRecorder();
@@ -64,6 +66,8 @@
virtual status_t reset();
virtual status_t getMaxAmplitude(int *max);
virtual status_t dump(int fd, const Vector<String16>& args) const;
+ // Querying a SurfaceMediaSourcer
+ virtual sp<ISurfaceTexture> querySurfaceMediaSource() const;
private:
sp<ICamera> mCamera;
@@ -109,12 +113,18 @@
sp<MediaSourceSplitter> mCameraSourceSplitter;
sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
+
String8 mParams;
bool mIsMetaDataStoredInVideoBuffers;
MediaProfiles *mEncoderProfiles;
bool mStarted;
+ // Needed when GLFrames are encoded.
+ // An <ISurfaceTexture> pointer
+ // will be sent to the client side using which the
+ // frame buffers will be queued and dequeued
+ sp<SurfaceMediaSource> mSurfaceMediaSource;
status_t setupMPEG4Recording(
bool useSplitCameraSource,
@@ -134,7 +144,14 @@
sp<MediaSource> createAudioSource();
status_t checkVideoEncoderCapabilities();
status_t checkAudioEncoderCapabilities();
+ // Generic MediaSource set-up. Returns the appropriate
+ // source (CameraSource or SurfaceMediaSource)
+ // depending on the videosource type
+ status_t setupMediaSource(sp<MediaSource> *mediaSource);
status_t setupCameraSource(sp<CameraSource> *cameraSource);
+ // setup the surfacemediasource for the encoder
+ status_t setupSurfaceMediaSource();
+
status_t setupAudioEncoder(const sp<MediaWriter>& writer);
status_t setupVideoEncoder(
sp<MediaSource> cameraSource,
@@ -176,6 +193,7 @@
void clipNumberOfAudioChannels();
void setDefaultProfileIfNecessary();
+
StagefrightRecorder(const StagefrightRecorder &);
StagefrightRecorder &operator=(const StagefrightRecorder &);
};
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 5a5330d..0251baf 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -58,8 +58,10 @@
}
NuPlayer::HTTPLiveSource::~HTTPLiveSource() {
- mLiveSession->disconnect();
- mLiveLooper->stop();
+ if (mLiveSession != NULL) {
+ mLiveSession->disconnect();
+ mLiveLooper->stop();
+ }
}
void NuPlayer::HTTPLiveSource::start() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 7cd8b6c..c6fca2c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -118,9 +118,15 @@
mPlayer->start();
if (mStartupSeekTimeUs >= 0) {
- mPlayer->seekToAsync(mStartupSeekTimeUs);
+ if (mStartupSeekTimeUs == 0) {
+ notifySeekComplete();
+ } else {
+ mPlayer->seekToAsync(mStartupSeekTimeUs);
+ }
+
mStartupSeekTimeUs = -1;
}
+
break;
}
case PLAYING:
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d4d07b2..174ec92 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -445,6 +445,13 @@
return err;
}
+ err = native_window_set_scaling_mode(mNativeWindow.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+
+ if (err != OK) {
+ return err;
+ }
+
err = native_window_set_buffers_geometry(
mNativeWindow.get(),
def.format.video.nFrameWidth,
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index e17e1e8..3a3c082 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -42,6 +42,7 @@
SampleTable.cpp \
StagefrightMediaScanner.cpp \
StagefrightMetadataRetriever.cpp \
+ SurfaceMediaSource.cpp \
ThrottledSource.cpp \
TimeSource.cpp \
TimedEventQueue.cpp \
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 1bc2fb9..de66d99 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -179,9 +179,6 @@
if (camera == 0) {
mCamera = Camera::connect(cameraId);
if (mCamera == 0) return -EBUSY;
- // If proxy is not passed in by applications, still use the proxy of
- // our own Camera to simplify the code.
- mCameraRecordingProxy = mCamera->getRecordingProxy();
mCameraFlags &= ~FLAGS_HOT_CAMERA;
} else {
// We get the proxy from Camera, not ICamera. We need to get the proxy
@@ -192,12 +189,12 @@
if (mCamera == 0) return -EBUSY;
mCameraRecordingProxy = proxy;
mCameraFlags |= FLAGS_HOT_CAMERA;
+ mDeathNotifier = new DeathNotifier();
+ // isBinderAlive needs linkToDeath to work.
+ mCameraRecordingProxy->asBinder()->linkToDeath(mDeathNotifier);
}
mCamera->lock();
- mDeathNotifier = new DeathNotifier();
- // isBinderAlive needs linkToDeath to work.
- mCameraRecordingProxy->asBinder()->linkToDeath(mDeathNotifier);
return OK;
}
@@ -292,7 +289,7 @@
CameraParameters* params,
int32_t width, int32_t height,
int32_t frameRate) {
-
+ LOGV("configureCamera");
Vector<Size> sizes;
bool isSetVideoSizeSupportedByCamera = true;
getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes);
@@ -368,6 +365,7 @@
const CameraParameters& params,
int32_t width, int32_t height) {
+ LOGV("checkVideoSize");
// The actual video size is the same as the preview size
// if the camera hal does not support separate video and
// preview output. In this case, we retrieve the video
@@ -419,6 +417,7 @@
const CameraParameters& params,
int32_t frameRate) {
+ LOGV("checkFrameRate");
int32_t frameRateActual = params.getPreviewFrameRate();
if (frameRateActual < 0) {
LOGE("Failed to retrieve preview frame rate (%d)", frameRateActual);
@@ -464,6 +463,7 @@
int32_t frameRate,
bool storeMetaDataInVideoBuffers) {
+ LOGV("init");
status_t err = OK;
int64_t token = IPCThreadState::self()->clearCallingIdentity();
err = initWithCameraAccess(camera, proxy, cameraId,
@@ -480,6 +480,7 @@
Size videoSize,
int32_t frameRate,
bool storeMetaDataInVideoBuffers) {
+ LOGV("initWithCameraAccess");
status_t err = OK;
if ((err = isCameraAvailable(camera, proxy, cameraId)) != OK) {
@@ -552,17 +553,25 @@
}
void CameraSource::startCameraRecording() {
+ LOGV("startCameraRecording");
// Reset the identity to the current thread because media server owns the
// camera and recording is started by the applications. The applications
// will connect to the camera in ICameraRecordingProxy::startRecording.
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->unlock();
- mCamera.clear();
+ if (mCameraFlags & FLAGS_HOT_CAMERA) {
+ mCamera->unlock();
+ mCamera.clear();
+ CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this)));
+ } else {
+ mCamera->setListener(new CameraSourceListener(this));
+ mCamera->startRecording();
+ CHECK(mCamera->recordingEnabled());
+ }
IPCThreadState::self()->restoreCallingIdentity(token);
- CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this)));
}
status_t CameraSource::start(MetaData *meta) {
+ LOGV("start");
CHECK(!mStarted);
if (mInitCheck != OK) {
LOGE("CameraSource is not initialized yet");
@@ -588,7 +597,13 @@
}
void CameraSource::stopCameraRecording() {
- mCameraRecordingProxy->stopRecording();
+ LOGV("stopCameraRecording");
+ if (mCameraFlags & FLAGS_HOT_CAMERA) {
+ mCameraRecordingProxy->stopRecording();
+ } else {
+ mCamera->setListener(NULL);
+ mCamera->stopRecording();
+ }
}
void CameraSource::releaseCamera() {
@@ -599,11 +614,10 @@
LOGV("Camera was cold when we started, stopping preview");
mCamera->stopPreview();
mCamera->disconnect();
- } else {
- // Unlock the camera so the application can lock it back.
- mCamera->unlock();
}
+ mCamera->unlock();
mCamera.clear();
+ mCamera = 0;
IPCThreadState::self()->restoreCallingIdentity(token);
}
if (mCameraRecordingProxy != 0) {
@@ -646,8 +660,13 @@
}
void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
+ LOGV("releaseRecordingFrame");
if (mCameraRecordingProxy != NULL) {
mCameraRecordingProxy->releaseRecordingFrame(frame);
+ } else {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mCamera->releaseRecordingFrame(frame);
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
}
@@ -707,7 +726,8 @@
while (mStarted && mFramesReceived.empty()) {
if (NO_ERROR !=
mFrameAvailableCondition.waitRelative(mLock, 1000000000LL)) {
- if (!mCameraRecordingProxy->asBinder()->isBinderAlive()) {
+ if (mCameraRecordingProxy != 0 &&
+ !mCameraRecordingProxy->asBinder()->isBinderAlive()) {
LOGW("camera recording proxy is gone");
return ERROR_END_OF_STREAM;
}
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index b7c8e0c..1f7ee25 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ESDS"
+#include <utils/Log.h>
+
#include "include/ESDS.h"
#include <string.h>
@@ -87,6 +91,8 @@
}
while (more);
+ LOGV("tag=0x%02x data_size=%d", *tag, *data_size);
+
if (*data_size > size) {
return ERROR_MALFORMED;
}
@@ -146,8 +152,20 @@
if (OCRstreamFlag) {
offset += 2;
size -= 2;
+
+ if ((offset >= size || mData[offset] != kTag_DecoderConfigDescriptor)
+ && offset - 2 < size
+ && mData[offset - 2] == kTag_DecoderConfigDescriptor) {
+ // Content found "in the wild" had OCRstreamFlag set but was
+ // missing OCR_ES_Id, the decoder config descriptor immediately
+ // followed instead.
+ offset -= 2;
+ size += 2;
+
+ LOGW("Found malformed 'esds' atom, ignoring missing OCR_ES_Id.");
+ }
}
-
+
if (offset >= size) {
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 77a6602..4edb613 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -185,7 +185,8 @@
mFinalStatus(OK),
mLastAccessPos(0),
mFetching(true),
- mLastFetchTimeUs(-1) {
+ mLastFetchTimeUs(-1),
+ mNumRetriesLeft(kMaxNumRetries) {
mLooper->setName("NuCachedSource2");
mLooper->registerHandler(mReflector);
mLooper->start();
@@ -254,7 +255,27 @@
void NuCachedSource2::fetchInternal() {
LOGV("fetchInternal");
- CHECK_EQ(mFinalStatus, (status_t)OK);
+ {
+ Mutex::Autolock autoLock(mLock);
+ CHECK(mFinalStatus == OK || mNumRetriesLeft > 0);
+
+ if (mFinalStatus != OK) {
+ --mNumRetriesLeft;
+
+ status_t err =
+ mSource->reconnectAtOffset(mCacheOffset + mCache->totalSize());
+
+ if (err == ERROR_UNSUPPORTED) {
+ mNumRetriesLeft = 0;
+ return;
+ } else if (err != OK) {
+ LOGI("The attempt to reconnect failed, %d retries remaining",
+ mNumRetriesLeft);
+
+ return;
+ }
+ }
+ }
PageCache::Page *page = mCache->acquirePage();
@@ -264,14 +285,23 @@
Mutex::Autolock autoLock(mLock);
if (n < 0) {
- LOGE("source returned error %ld", n);
+ LOGE("source returned error %ld, %d retries left", n, mNumRetriesLeft);
mFinalStatus = n;
mCache->releasePage(page);
} else if (n == 0) {
LOGI("ERROR_END_OF_STREAM");
+
+ mNumRetriesLeft = 0;
mFinalStatus = ERROR_END_OF_STREAM;
+
mCache->releasePage(page);
} else {
+ if (mFinalStatus != OK) {
+ LOGI("retrying a previously failed read succeeded.");
+ }
+ mNumRetriesLeft = kMaxNumRetries;
+ mFinalStatus = OK;
+
page->mSize = n;
mCache->appendPage(page);
}
@@ -280,7 +310,7 @@
void NuCachedSource2::onFetch() {
LOGV("onFetch");
- if (mFinalStatus != OK) {
+ if (mFinalStatus != OK && mNumRetriesLeft == 0) {
LOGV("EOS reached, done prefetching for now");
mFetching = false;
}
@@ -308,8 +338,19 @@
restartPrefetcherIfNecessary_l();
}
- (new AMessage(kWhatFetchMore, mReflector->id()))->post(
- mFetching ? 0 : 100000ll);
+ int64_t delayUs;
+ if (mFetching) {
+ if (mFinalStatus != OK && mNumRetriesLeft > 0) {
+ // We failed this time and will try again in 3 seconds.
+ delayUs = 3000000ll;
+ } else {
+ delayUs = 0;
+ }
+ } else {
+ delayUs = 100000ll;
+ }
+
+ (new AMessage(kWhatFetchMore, mReflector->id()))->post(delayUs);
}
void NuCachedSource2::onRead(const sp<AMessage> &msg) {
@@ -345,7 +386,7 @@
bool ignoreLowWaterThreshold, bool force) {
static const size_t kGrayArea = 1024 * 1024;
- if (mFetching || mFinalStatus != OK) {
+ if (mFetching || (mFinalStatus != OK && mNumRetriesLeft == 0)) {
return;
}
@@ -427,6 +468,12 @@
size_t NuCachedSource2::approxDataRemaining_l(status_t *finalStatus) {
*finalStatus = mFinalStatus;
+
+ if (mFinalStatus != OK && mNumRetriesLeft > 0) {
+ // Pretend that everything is fine until we're out of retries.
+ *finalStatus = OK;
+ }
+
off64_t lastBytePosCached = mCacheOffset + mCache->totalSize();
if (mLastAccessPos < lastBytePosCached) {
return lastBytePosCached - mLastAccessPos;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 78349a6..ac73351 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1769,6 +1769,13 @@
return err;
}
+ err = native_window_set_scaling_mode(mNativeWindow.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+
+ if (err != OK) {
+ return err;
+ }
+
err = native_window_set_buffers_geometry(
mNativeWindow.get(),
def.format.video.nFrameWidth,
@@ -4343,26 +4350,19 @@
status_t QueryCodecs(
const sp<IOMX> &omx,
- const char *mime, bool queryDecoders,
+ const char *mime, bool queryDecoders, bool hwCodecOnly,
Vector<CodecCapabilities> *results) {
+ Vector<String8> matchingCodecs;
results->clear();
- for (int index = 0;; ++index) {
- const char *componentName;
+ OMXCodec::findMatchingCodecs(mime,
+ !queryDecoders /*createEncoder*/,
+ NULL /*matchComponentName*/,
+ hwCodecOnly ? OMXCodec::kHardwareCodecsOnly : 0 /*flags*/,
+ &matchingCodecs);
- if (!queryDecoders) {
- componentName = GetCodec(
- kEncoderInfo, sizeof(kEncoderInfo) / sizeof(kEncoderInfo[0]),
- mime, index);
- } else {
- componentName = GetCodec(
- kDecoderInfo, sizeof(kDecoderInfo) / sizeof(kDecoderInfo[0]),
- mime, index);
- }
-
- if (!componentName) {
- return OK;
- }
+ for (size_t c = 0; c < matchingCodecs.size(); c++) {
+ const char *componentName = matchingCodecs.itemAt(c).string();
if (strncmp(componentName, "OMX.", 4)) {
// Not an OpenMax component but a software codec.
@@ -4424,6 +4424,15 @@
CHECK_EQ(omx->freeNode(node), (status_t)OK);
}
+
+ return OK;
+}
+
+status_t QueryCodecs(
+ const sp<IOMX> &omx,
+ const char *mimeType, bool queryDecoders,
+ Vector<CodecCapabilities> *results) {
+ return QueryCodecs(omx, mimeType, queryDecoders, false /*hwCodecOnly*/, results);
}
void OMXCodec::restorePatchedDataPointer(BufferInfo *info) {
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index 89faff7..571e8be 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -52,13 +52,13 @@
return false;
}
-static status_t HandleMIDI(
+static MediaScanResult HandleMIDI(
const char *filename, MediaScannerClient *client) {
// get the library configuration and do sanity check
const S_EAS_LIB_CONFIG* pLibConfig = EAS_Config();
if ((pLibConfig == NULL) || (LIB_VERSION != pLibConfig->libVersion)) {
LOGE("EAS library/header mismatch\n");
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_ERROR;
}
EAS_I32 temp;
@@ -88,34 +88,41 @@
}
if (result != EAS_SUCCESS) {
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_SKIPPED;
}
char buffer[20];
sprintf(buffer, "%ld", temp);
- if (!client->addStringTag("duration", buffer)) return UNKNOWN_ERROR;
-
- return OK;
+ status_t status = client->addStringTag("duration", buffer);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
+ }
+ return MEDIA_SCAN_RESULT_OK;
}
-status_t StagefrightMediaScanner::processFile(
+MediaScanResult StagefrightMediaScanner::processFile(
const char *path, const char *mimeType,
MediaScannerClient &client) {
LOGV("processFile '%s'.", path);
client.setLocale(locale());
client.beginFile();
+ MediaScanResult result = processFileInternal(path, mimeType, client);
+ client.endFile();
+ return result;
+}
+MediaScanResult StagefrightMediaScanner::processFileInternal(
+ const char *path, const char *mimeType,
+ MediaScannerClient &client) {
const char *extension = strrchr(path, '.');
if (!extension) {
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_SKIPPED;
}
if (!FileHasAcceptableExtension(extension)) {
- client.endFile();
-
- return UNKNOWN_ERROR;
+ return MEDIA_SCAN_RESULT_SKIPPED;
}
if (!strcasecmp(extension, ".mid")
@@ -127,53 +134,57 @@
|| !strcasecmp(extension, ".rtx")
|| !strcasecmp(extension, ".ota")
|| !strcasecmp(extension, ".mxmf")) {
- status_t status = HandleMIDI(path, &client);
- if (status != OK) {
- return status;
+ return HandleMIDI(path, &client);
+ }
+
+ sp<MediaMetadataRetriever> mRetriever(new MediaMetadataRetriever);
+
+ status_t status = mRetriever->setDataSource(path);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
+ }
+
+ const char *value;
+ if ((value = mRetriever->extractMetadata(
+ METADATA_KEY_MIMETYPE)) != NULL) {
+ status = client.setMimeType(value);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
}
- } else {
- sp<MediaMetadataRetriever> mRetriever(new MediaMetadataRetriever);
+ }
- if (mRetriever->setDataSource(path) == OK) {
- const char *value;
- if ((value = mRetriever->extractMetadata(
- METADATA_KEY_MIMETYPE)) != NULL) {
- client.setMimeType(value);
- }
+ struct KeyMap {
+ const char *tag;
+ int key;
+ };
+ static const KeyMap kKeyMap[] = {
+ { "tracknumber", METADATA_KEY_CD_TRACK_NUMBER },
+ { "discnumber", METADATA_KEY_DISC_NUMBER },
+ { "album", METADATA_KEY_ALBUM },
+ { "artist", METADATA_KEY_ARTIST },
+ { "albumartist", METADATA_KEY_ALBUMARTIST },
+ { "composer", METADATA_KEY_COMPOSER },
+ { "genre", METADATA_KEY_GENRE },
+ { "title", METADATA_KEY_TITLE },
+ { "year", METADATA_KEY_YEAR },
+ { "duration", METADATA_KEY_DURATION },
+ { "writer", METADATA_KEY_WRITER },
+ { "compilation", METADATA_KEY_COMPILATION },
+ { "isdrm", METADATA_KEY_IS_DRM },
+ };
+ static const size_t kNumEntries = sizeof(kKeyMap) / sizeof(kKeyMap[0]);
- struct KeyMap {
- const char *tag;
- int key;
- };
- static const KeyMap kKeyMap[] = {
- { "tracknumber", METADATA_KEY_CD_TRACK_NUMBER },
- { "discnumber", METADATA_KEY_DISC_NUMBER },
- { "album", METADATA_KEY_ALBUM },
- { "artist", METADATA_KEY_ARTIST },
- { "albumartist", METADATA_KEY_ALBUMARTIST },
- { "composer", METADATA_KEY_COMPOSER },
- { "genre", METADATA_KEY_GENRE },
- { "title", METADATA_KEY_TITLE },
- { "year", METADATA_KEY_YEAR },
- { "duration", METADATA_KEY_DURATION },
- { "writer", METADATA_KEY_WRITER },
- { "compilation", METADATA_KEY_COMPILATION },
- { "isdrm", METADATA_KEY_IS_DRM },
- };
- static const size_t kNumEntries = sizeof(kKeyMap) / sizeof(kKeyMap[0]);
-
- for (size_t i = 0; i < kNumEntries; ++i) {
- const char *value;
- if ((value = mRetriever->extractMetadata(kKeyMap[i].key)) != NULL) {
- client.addStringTag(kKeyMap[i].tag, value);
- }
+ for (size_t i = 0; i < kNumEntries; ++i) {
+ const char *value;
+ if ((value = mRetriever->extractMetadata(kKeyMap[i].key)) != NULL) {
+ status = client.addStringTag(kKeyMap[i].tag, value);
+ if (status) {
+ return MEDIA_SCAN_RESULT_ERROR;
}
}
}
- client.endFile();
-
- return OK;
+ return MEDIA_SCAN_RESULT_OK;
}
char *StagefrightMediaScanner::extractAlbumArt(int fd) {
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
new file mode 100644
index 0000000..ff4b08f
--- /dev/null
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -0,0 +1,756 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SurfaceMediaSource"
+
+#include <media/stagefright/SurfaceMediaSource.h>
+#include <ui/GraphicBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/openmax/OMX_IVCommon.h>
+
+#include <surfaceflinger/ISurfaceComposer.h>
+#include <surfaceflinger/SurfaceComposerClient.h>
+#include <surfaceflinger/IGraphicBufferAlloc.h>
+#include <OMX_Component.h>
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) :
+ mDefaultWidth(bufW),
+ mDefaultHeight(bufH),
+ mPixelFormat(0),
+ mBufferCount(MIN_ASYNC_BUFFER_SLOTS),
+ mClientBufferCount(0),
+ mServerBufferCount(MIN_ASYNC_BUFFER_SLOTS),
+ mCurrentSlot(INVALID_BUFFER_SLOT),
+ mCurrentTimestamp(0),
+ mSynchronousMode(true),
+ mConnectedApi(NO_CONNECTED_API),
+ mFrameRate(30),
+ mStarted(false) {
+ LOGV("SurfaceMediaSource::SurfaceMediaSource");
+ sp<ISurfaceComposer> composer(ComposerService::getComposerService());
+ mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
+}
+
+SurfaceMediaSource::~SurfaceMediaSource() {
+ LOGV("SurfaceMediaSource::~SurfaceMediaSource");
+ if (mStarted) {
+ stop();
+ }
+ freeAllBuffers();
+}
+
+size_t SurfaceMediaSource::getQueuedCount() const {
+ Mutex::Autolock lock(mMutex);
+ return mQueue.size();
+}
+
+status_t SurfaceMediaSource::setBufferCountServerLocked(int bufferCount) {
+ if (bufferCount > NUM_BUFFER_SLOTS)
+ return BAD_VALUE;
+
+ // special-case, nothing to do
+ if (bufferCount == mBufferCount)
+ return OK;
+
+ if (!mClientBufferCount &&
+ bufferCount >= mBufferCount) {
+ // easy, we just have more buffers
+ mBufferCount = bufferCount;
+ mServerBufferCount = bufferCount;
+ mDequeueCondition.signal();
+ } else {
+ // we're here because we're either
+ // - reducing the number of available buffers
+ // - or there is a client-buffer-count in effect
+
+ // less than 2 buffers is never allowed
+ if (bufferCount < 2)
+ return BAD_VALUE;
+
+ // when there is non client-buffer-count in effect, the client is not
+ // allowed to dequeue more than one buffer at a time,
+ // so the next time they dequeue a buffer, we know that they don't
+ // own one. the actual resizing will happen during the next
+ // dequeueBuffer.
+
+ mServerBufferCount = bufferCount;
+ }
+ return OK;
+}
+
+// Called from the consumer side
+status_t SurfaceMediaSource::setBufferCountServer(int bufferCount) {
+ Mutex::Autolock lock(mMutex);
+ return setBufferCountServerLocked(bufferCount);
+}
+
+status_t SurfaceMediaSource::setBufferCount(int bufferCount) {
+ LOGV("SurfaceMediaSource::setBufferCount");
+ if (bufferCount > NUM_BUFFER_SLOTS) {
+ LOGE("setBufferCount: bufferCount is larger than the number of buffer slots");
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+ // Error out if the user has dequeued buffers
+ for (int i = 0 ; i < mBufferCount ; i++) {
+ if (mSlots[i].mBufferState == BufferSlot::DEQUEUED) {
+ LOGE("setBufferCount: client owns some buffers");
+ return INVALID_OPERATION;
+ }
+ }
+
+ if (bufferCount == 0) {
+ const int minBufferSlots = mSynchronousMode ?
+ MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS;
+ mClientBufferCount = 0;
+ bufferCount = (mServerBufferCount >= minBufferSlots) ?
+ mServerBufferCount : minBufferSlots;
+ return setBufferCountServerLocked(bufferCount);
+ }
+
+ // We don't allow the client to set a buffer-count less than
+ // MIN_ASYNC_BUFFER_SLOTS (3), there is no reason for it.
+ if (bufferCount < MIN_ASYNC_BUFFER_SLOTS) {
+ return BAD_VALUE;
+ }
+
+ // here we're guaranteed that the client doesn't have dequeued buffers
+ // and will release all of its buffer references.
+ freeAllBuffers();
+ mBufferCount = bufferCount;
+ mClientBufferCount = bufferCount;
+ mCurrentSlot = INVALID_BUFFER_SLOT;
+ mQueue.clear();
+ mDequeueCondition.signal();
+ return OK;
+}
+
+status_t SurfaceMediaSource::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
+ LOGV("SurfaceMediaSource::requestBuffer");
+ Mutex::Autolock lock(mMutex);
+ if (slot < 0 || mBufferCount <= slot) {
+ LOGE("requestBuffer: slot index out of range [0, %d]: %d",
+ mBufferCount, slot);
+ return BAD_VALUE;
+ }
+ mSlots[slot].mRequestBufferCalled = true;
+ *buf = mSlots[slot].mGraphicBuffer;
+ return NO_ERROR;
+}
+
+status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
+ uint32_t format, uint32_t usage) {
+ LOGV("dequeueBuffer");
+
+
+ // Check for the buffer size- the client should just use the
+ // default width and height, and not try to set those.
+ // This is needed since
+ // the getFormat() returns mDefaultWidth/ Height for the OMX. It is
+ // queried by OMX in the beginning and not every time a frame comes.
+ // Not sure if there is a way to update the
+ // frame size while recording. So as of now, the client side
+ // sets the default values via the constructor, and the encoder is
+ // setup to encode frames of that size
+ // The design might need to change in the future.
+ // TODO: Currently just uses mDefaultWidth/Height. In the future
+ // we might declare mHeight and mWidth and check against those here.
+ if ((w != 0) || (h != 0)) {
+ LOGE("dequeuebuffer: invalid buffer size! Req: %dx%d, Found: %dx%d",
+ mDefaultWidth, mDefaultHeight, w, h);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+
+ status_t returnFlags(OK);
+
+ int found, foundSync;
+ int dequeuedCount = 0;
+ bool tryAgain = true;
+ while (tryAgain) {
+ // We need to wait for the FIFO to drain if the number of buffer
+ // needs to change.
+ //
+ // The condition "number of buffer needs to change" is true if
+ // - the client doesn't care about how many buffers there are
+ // - AND the actual number of buffer is different from what was
+ // set in the last setBufferCountServer()
+ // - OR -
+ // setBufferCountServer() was set to a value incompatible with
+ // the synchronization mode (for instance because the sync mode
+ // changed since)
+ //
+ // As long as this condition is true AND the FIFO is not empty, we
+ // wait on mDequeueCondition.
+
+ int minBufferCountNeeded = mSynchronousMode ?
+ MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS;
+
+ if (!mClientBufferCount &&
+ ((mServerBufferCount != mBufferCount) ||
+ (mServerBufferCount < minBufferCountNeeded))) {
+ // wait for the FIFO to drain
+ while (!mQueue.isEmpty()) {
+ LOGV("Waiting for the FIFO to drain");
+ mDequeueCondition.wait(mMutex);
+ }
+ // need to check again since the mode could have changed
+ // while we were waiting
+ minBufferCountNeeded = mSynchronousMode ?
+ MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS;
+ }
+
+ if (!mClientBufferCount &&
+ ((mServerBufferCount != mBufferCount) ||
+ (mServerBufferCount < minBufferCountNeeded))) {
+ // here we're guaranteed that mQueue is empty
+ freeAllBuffers();
+ mBufferCount = mServerBufferCount;
+ if (mBufferCount < minBufferCountNeeded)
+ mBufferCount = minBufferCountNeeded;
+ mCurrentSlot = INVALID_BUFFER_SLOT;
+ returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS;
+ }
+
+ // look for a free buffer to give to the client
+ found = INVALID_BUFFER_SLOT;
+ foundSync = INVALID_BUFFER_SLOT;
+ dequeuedCount = 0;
+ for (int i = 0; i < mBufferCount; i++) {
+ const int state = mSlots[i].mBufferState;
+ if (state == BufferSlot::DEQUEUED) {
+ dequeuedCount++;
+ continue; // won't be continuing if could
+ // dequeue a non 'FREE' current slot like
+ // that in SurfaceTexture
+ }
+ // In case of Encoding, we do not deque the mCurrentSlot buffer
+ // since we follow synchronous mode (unlike possibly in
+ // SurfaceTexture that could be using the asynch mode
+ // or has some mechanism in GL to be able to wait till the
+ // currentslot is done using the data)
+ // Here, we have to wait for the MPEG4Writer(or equiv)
+ // to tell us when it's done using the current buffer
+ if (state == BufferSlot::FREE) {
+ foundSync = i;
+ // Unlike that in SurfaceTexture,
+ // We don't need to worry if it is the
+ // currentslot or not as it is in state FREE
+ found = i;
+ break;
+ }
+ }
+
+ // clients are not allowed to dequeue more than one buffer
+ // if they didn't set a buffer count.
+ if (!mClientBufferCount && dequeuedCount) {
+ return -EINVAL;
+ }
+
+ // See whether a buffer has been queued since the last setBufferCount so
+ // we know whether to perform the MIN_UNDEQUEUED_BUFFERS check below.
+ bool bufferHasBeenQueued = mCurrentSlot != INVALID_BUFFER_SLOT;
+ if (bufferHasBeenQueued) {
+ // make sure the client is not trying to dequeue more buffers
+ // than allowed.
+ const int avail = mBufferCount - (dequeuedCount+1);
+ if (avail < (MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode))) {
+ LOGE("dequeueBuffer: MIN_UNDEQUEUED_BUFFERS=%d exceeded (dequeued=%d)",
+ MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode),
+ dequeuedCount);
+ return -EBUSY;
+ }
+ }
+
+ // we're in synchronous mode and didn't find a buffer, we need to wait
+ // for for some buffers to be consumed
+ tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT);
+ if (tryAgain) {
+ LOGW("Waiting..In synchronous mode and no buffer to dQ");
+ mDequeueCondition.wait(mMutex);
+ }
+ }
+
+ if (mSynchronousMode && found == INVALID_BUFFER_SLOT) {
+ // foundSync guaranteed to be != INVALID_BUFFER_SLOT
+ found = foundSync;
+ }
+
+ if (found == INVALID_BUFFER_SLOT) {
+ return -EBUSY;
+ }
+
+ const int buf = found;
+ *outBuf = found;
+
+ const bool useDefaultSize = !w && !h;
+ if (useDefaultSize) {
+ // use the default size
+ w = mDefaultWidth;
+ h = mDefaultHeight;
+ }
+
+ const bool updateFormat = (format != 0);
+ if (!updateFormat) {
+ // keep the current (or default) format
+ format = mPixelFormat;
+ }
+
+ // buffer is now in DEQUEUED (but can also be current at the same time,
+ // if we're in synchronous mode)
+ mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
+
+ const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer);
+ if ((buffer == NULL) ||
+ (uint32_t(buffer->width) != w) ||
+ (uint32_t(buffer->height) != h) ||
+ (uint32_t(buffer->format) != format) ||
+ ((uint32_t(buffer->usage) & usage) != usage)) {
+ usage |= GraphicBuffer::USAGE_HW_TEXTURE;
+ status_t error;
+ sp<GraphicBuffer> graphicBuffer(
+ mGraphicBufferAlloc->createGraphicBuffer(
+ w, h, format, usage, &error));
+ if (graphicBuffer == 0) {
+ LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer failed");
+ return error;
+ }
+ if (updateFormat) {
+ mPixelFormat = format;
+ }
+ mSlots[buf].mGraphicBuffer = graphicBuffer;
+ mSlots[buf].mRequestBufferCalled = false;
+ returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;
+ }
+ return returnFlags;
+}
+
+status_t SurfaceMediaSource::setSynchronousMode(bool enabled) {
+ Mutex::Autolock lock(mMutex);
+
+ status_t err = OK;
+ if (!enabled) {
+ // going to asynchronous mode, drain the queue
+ while (mSynchronousMode != enabled && !mQueue.isEmpty()) {
+ mDequeueCondition.wait(mMutex);
+ }
+ }
+
+ if (mSynchronousMode != enabled) {
+ // - if we're going to asynchronous mode, the queue is guaranteed to be
+ // empty here
+ // - if the client set the number of buffers, we're guaranteed that
+ // we have at least 3 (because we don't allow less)
+ mSynchronousMode = enabled;
+ mDequeueCondition.signal();
+ }
+ return err;
+}
+
+status_t SurfaceMediaSource::connect(int api) {
+ LOGV("SurfaceMediaSource::connect");
+ Mutex::Autolock lock(mMutex);
+ status_t err = NO_ERROR;
+ switch (api) {
+ case NATIVE_WINDOW_API_EGL:
+ case NATIVE_WINDOW_API_CPU:
+ case NATIVE_WINDOW_API_MEDIA:
+ case NATIVE_WINDOW_API_CAMERA:
+ if (mConnectedApi != NO_CONNECTED_API) {
+ err = -EINVAL;
+ } else {
+ mConnectedApi = api;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+status_t SurfaceMediaSource::disconnect(int api) {
+ LOGV("SurfaceMediaSource::disconnect");
+ Mutex::Autolock lock(mMutex);
+ status_t err = NO_ERROR;
+ switch (api) {
+ case NATIVE_WINDOW_API_EGL:
+ case NATIVE_WINDOW_API_CPU:
+ case NATIVE_WINDOW_API_MEDIA:
+ case NATIVE_WINDOW_API_CAMERA:
+ if (mConnectedApi == api) {
+ mConnectedApi = NO_CONNECTED_API;
+ } else {
+ err = -EINVAL;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+status_t SurfaceMediaSource::queueBuffer(int buf, int64_t timestamp,
+ uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {
+ LOGV("queueBuffer");
+
+ Mutex::Autolock lock(mMutex);
+ if (buf < 0 || buf >= mBufferCount) {
+ LOGE("queueBuffer: slot index out of range [0, %d]: %d",
+ mBufferCount, buf);
+ return -EINVAL;
+ } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
+ LOGE("queueBuffer: slot %d is not owned by the client (state=%d)",
+ buf, mSlots[buf].mBufferState);
+ return -EINVAL;
+ } else if (!mSlots[buf].mRequestBufferCalled) {
+ LOGE("queueBuffer: slot %d was enqueued without requesting a "
+ "buffer", buf);
+ return -EINVAL;
+ }
+
+ if (mSynchronousMode) {
+ // in synchronous mode we queue all buffers in a FIFO
+ mQueue.push_back(buf);
+ LOGV("Client queued buffer on slot: %d, Q size = %d",
+ buf, mQueue.size());
+ } else {
+ // in asynchronous mode we only keep the most recent buffer
+ if (mQueue.empty()) {
+ mQueue.push_back(buf);
+ } else {
+ Fifo::iterator front(mQueue.begin());
+ // buffer currently queued is freed
+ mSlots[*front].mBufferState = BufferSlot::FREE;
+ // and we record the new buffer index in the queued list
+ *front = buf;
+ }
+ }
+
+ mSlots[buf].mBufferState = BufferSlot::QUEUED;
+ mSlots[buf].mTimestamp = timestamp;
+ // TODO: (Confirm) Don't want to signal dequeue here.
+ // May be just in asynchronous mode?
+ // mDequeueCondition.signal();
+
+ // Once the queuing is done, we need to let the listener
+ // and signal the buffer consumer (encoder) know that a
+ // buffer is available
+ onFrameReceivedLocked();
+
+ *outWidth = mDefaultWidth;
+ *outHeight = mDefaultHeight;
+ *outTransform = 0;
+
+ return OK;
+}
+
+
+// onFrameReceivedLocked informs the buffer consumers (StageFrightRecorder)
+// or listeners that a frame has been received
+// It is supposed to be called only from queuebuffer.
+// The buffer is NOT made available for dequeueing immediately. We need to
+// wait to hear from StageFrightRecorder to set the buffer FREE
+// Make sure this is called when the mutex is locked
+status_t SurfaceMediaSource::onFrameReceivedLocked() {
+ LOGV("On Frame Received");
+ // Signal the encoder that a new frame has arrived
+ mFrameAvailableCondition.signal();
+
+ // call back the listener
+ // TODO: The listener may not be needed in SurfaceMediaSource at all.
+ // This can be made a SurfaceTexture specific thing
+ sp<FrameAvailableListener> listener;
+ if (mSynchronousMode || mQueue.empty()) {
+ listener = mFrameAvailableListener;
+ }
+
+ if (listener != 0) {
+ listener->onFrameAvailable();
+ }
+ return OK;
+}
+
+
+void SurfaceMediaSource::cancelBuffer(int buf) {
+ LOGV("SurfaceMediaSource::cancelBuffer");
+ Mutex::Autolock lock(mMutex);
+ if (buf < 0 || buf >= mBufferCount) {
+ LOGE("cancelBuffer: slot index out of range [0, %d]: %d",
+ mBufferCount, buf);
+ return;
+ } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
+ LOGE("cancelBuffer: slot %d is not owned by the client (state=%d)",
+ buf, mSlots[buf].mBufferState);
+ return;
+ }
+ mSlots[buf].mBufferState = BufferSlot::FREE;
+ mDequeueCondition.signal();
+}
+
+nsecs_t SurfaceMediaSource::getTimestamp() {
+ LOGV("SurfaceMediaSource::getTimestamp");
+ Mutex::Autolock lock(mMutex);
+ return mCurrentTimestamp;
+}
+
+
+void SurfaceMediaSource::setFrameAvailableListener(
+ const sp<FrameAvailableListener>& listener) {
+ LOGV("SurfaceMediaSource::setFrameAvailableListener");
+ Mutex::Autolock lock(mMutex);
+ mFrameAvailableListener = listener;
+}
+
+void SurfaceMediaSource::freeAllBuffers() {
+ LOGV("freeAllBuffers");
+ for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
+ mSlots[i].mGraphicBuffer = 0;
+ mSlots[i].mBufferState = BufferSlot::FREE;
+ }
+}
+
+sp<GraphicBuffer> SurfaceMediaSource::getCurrentBuffer() const {
+ Mutex::Autolock lock(mMutex);
+ return mCurrentBuf;
+}
+
+int SurfaceMediaSource::query(int what, int* outValue)
+{
+ LOGV("query");
+ Mutex::Autolock lock(mMutex);
+ int value;
+ switch (what) {
+ case NATIVE_WINDOW_WIDTH:
+ value = mDefaultWidth;
+ if (!mDefaultWidth && !mDefaultHeight && mCurrentBuf != 0)
+ value = mCurrentBuf->width;
+ break;
+ case NATIVE_WINDOW_HEIGHT:
+ value = mDefaultHeight;
+ if (!mDefaultWidth && !mDefaultHeight && mCurrentBuf != 0)
+ value = mCurrentBuf->height;
+ break;
+ case NATIVE_WINDOW_FORMAT:
+ value = mPixelFormat;
+ break;
+ case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
+ value = mSynchronousMode ?
+ (MIN_UNDEQUEUED_BUFFERS-1) : MIN_UNDEQUEUED_BUFFERS;
+ break;
+ default:
+ return BAD_VALUE;
+ }
+ outValue[0] = value;
+ return NO_ERROR;
+}
+
+void SurfaceMediaSource::dump(String8& result) const
+{
+ char buffer[1024];
+ dump(result, "", buffer, 1024);
+}
+
+void SurfaceMediaSource::dump(String8& result, const char* prefix,
+ char* buffer, size_t SIZE) const
+{
+ Mutex::Autolock _l(mMutex);
+ snprintf(buffer, SIZE,
+ "%smBufferCount=%d, mSynchronousMode=%d, default-size=[%dx%d], "
+ "mPixelFormat=%d, \n",
+ prefix, mBufferCount, mSynchronousMode, mDefaultWidth, mDefaultHeight,
+ mPixelFormat);
+ result.append(buffer);
+
+ String8 fifo;
+ int fifoSize = 0;
+ Fifo::const_iterator i(mQueue.begin());
+ while (i != mQueue.end()) {
+ snprintf(buffer, SIZE, "%02d ", *i++);
+ fifoSize++;
+ fifo.append(buffer);
+ }
+
+ result.append(buffer);
+
+ struct {
+ const char * operator()(int state) const {
+ switch (state) {
+ case BufferSlot::DEQUEUED: return "DEQUEUED";
+ case BufferSlot::QUEUED: return "QUEUED";
+ case BufferSlot::FREE: return "FREE";
+ default: return "Unknown";
+ }
+ }
+ } stateName;
+
+ for (int i = 0; i < mBufferCount; i++) {
+ const BufferSlot& slot(mSlots[i]);
+ snprintf(buffer, SIZE,
+ "%s%s[%02d] state=%-8s, "
+ "timestamp=%lld\n",
+ prefix, (i==mCurrentSlot)?">":" ", i, stateName(slot.mBufferState),
+ slot.mTimestamp
+ );
+ result.append(buffer);
+ }
+}
+
+status_t SurfaceMediaSource::setFrameRate(int32_t fps)
+{
+ Mutex::Autolock lock(mMutex);
+ const int MAX_FRAME_RATE = 60;
+ if (fps < 0 || fps > MAX_FRAME_RATE) {
+ return BAD_VALUE;
+ }
+ mFrameRate = fps;
+ return OK;
+}
+
+bool SurfaceMediaSource::isMetaDataStoredInVideoBuffers() const {
+ LOGV("isMetaDataStoredInVideoBuffers");
+ return true;
+}
+
+int32_t SurfaceMediaSource::getFrameRate( ) const {
+ Mutex::Autolock lock(mMutex);
+ return mFrameRate;
+}
+
+status_t SurfaceMediaSource::start(MetaData *params)
+{
+ LOGV("start");
+ Mutex::Autolock lock(mMutex);
+ CHECK(!mStarted);
+ mStarted = true;
+ return OK;
+}
+
+
+status_t SurfaceMediaSource::stop()
+{
+ LOGV("Stop");
+
+ Mutex::Autolock lock(mMutex);
+ // TODO: Add waiting on mFrameCompletedCondition here?
+ mStarted = false;
+ mFrameAvailableCondition.signal();
+
+ return OK;
+}
+
+sp<MetaData> SurfaceMediaSource::getFormat()
+{
+ LOGV("getFormat");
+ Mutex::Autolock autoLock(mMutex);
+ sp<MetaData> meta = new MetaData;
+
+ meta->setInt32(kKeyWidth, mDefaultWidth);
+ meta->setInt32(kKeyHeight, mDefaultHeight);
+ // The encoder format is set as an opaque colorformat
+ // The encoder will later find out the actual colorformat
+ // from the GL Frames itself.
+ meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatAndroidOpaque);
+ meta->setInt32(kKeyStride, mDefaultWidth);
+ meta->setInt32(kKeySliceHeight, mDefaultHeight);
+ meta->setInt32(kKeyFrameRate, mFrameRate);
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
+ return meta;
+}
+
+status_t SurfaceMediaSource::read( MediaBuffer **buffer,
+ const ReadOptions *options)
+{
+ LOGV("Read. Size of queued buffer: %d", mQueue.size());
+ *buffer = NULL;
+
+ Mutex::Autolock autoLock(mMutex) ;
+ // If the recording has started and the queue is empty, then just
+ // wait here till the frames come in from the client side
+ while (mStarted && mQueue.empty()) {
+ LOGV("NO FRAMES! Recorder waiting for FrameAvailableCondition");
+ mFrameAvailableCondition.wait(mMutex);
+ }
+
+ // If the loop was exited as a result of stopping the recording,
+ // it is OK
+ if (!mStarted) {
+ return OK;
+ }
+
+ // Update the current buffer info
+ // TODO: mCurrentSlot can be made a bufferstate since there
+ // can be more than one "current" slots.
+ Fifo::iterator front(mQueue.begin());
+ mCurrentSlot = *front;
+ mCurrentBuf = mSlots[mCurrentSlot].mGraphicBuffer;
+ mCurrentTimestamp = mSlots[mCurrentSlot].mTimestamp;
+
+ // Pass the data to the MediaBuffer
+ // TODO: Change later to pass in only the metadata
+ *buffer = new MediaBuffer(mCurrentBuf);
+ (*buffer)->setObserver(this);
+ (*buffer)->add_ref();
+ (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp);
+
+ return OK;
+}
+
+void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {
+ LOGV("signalBufferReturned");
+
+ bool foundBuffer = false;
+ Mutex::Autolock autoLock(mMutex);
+
+ if (!mStarted) {
+ LOGV("started = false. Nothing to do");
+ return;
+ }
+
+ for (Fifo::iterator it = mQueue.begin(); it != mQueue.end(); ++it) {
+ if (mSlots[*it].mGraphicBuffer == buffer->graphicBuffer()) {
+ LOGV("Buffer %d returned. Setting it 'FREE'. New Queue size = %d",
+ *it, mQueue.size()-1);
+ mSlots[*it].mBufferState = BufferSlot::FREE;
+ mQueue.erase(it);
+ buffer->setObserver(0);
+ buffer->release();
+ mDequeueCondition.signal();
+ mFrameCompleteCondition.signal();
+ foundBuffer = true;
+ break;
+ }
+ }
+
+ if (!foundBuffer) {
+ CHECK_EQ(0, "signalBufferReturned: bogus buffer");
+ }
+}
+
+
+
+} // end of namespace android
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index bf978d7..c406964 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -370,7 +370,9 @@
int16_t *dst = (int16_t *)tmp->data();
const uint8_t *src = (const uint8_t *)buffer->data();
- while (n-- > 0) {
+ ssize_t numBytes = n;
+
+ while (numBytes-- > 0) {
*dst++ = ((int16_t)(*src) - 128) * 256;
++src;
}
diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
index 588a74d..07a9eb8 100644
--- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
+++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
@@ -25,6 +25,8 @@
#include "support.h"
+#include <cutils/properties.h> // for property_get
+
namespace android {
ChromiumHTTPDataSource::ChromiumHTTPDataSource(uint32_t flags)
@@ -111,7 +113,7 @@
mState = DISCONNECTED;
mCondition.broadcast();
- mURI.clear();
+ // mURI.clear();
mIOResult = err;
@@ -150,9 +152,19 @@
Mutex::Autolock autoLock(mLock);
if (mState != CONNECTED) {
- return ERROR_NOT_CONNECTED;
+ return INVALID_OPERATION;
}
+#if 0
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("media.stagefright.disable-net", value, 0)
+ && (!strcasecmp(value, "true") || !strcmp(value, "1"))) {
+ LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Simulating that the network is down.");
+ disconnect_l();
+ return ERROR_IO;
+ }
+#endif
+
if (offset != mCurrentOffset) {
AString tmp = mURI;
KeyedVector<String8, String8> tmpHeaders = mHeaders;
@@ -236,7 +248,7 @@
CHECK_EQ((int)mState, (int)DISCONNECTING);
mState = DISCONNECTED;
- mURI.clear();
+ // mURI.clear();
mCondition.broadcast();
@@ -299,5 +311,21 @@
}
}
+status_t ChromiumHTTPDataSource::reconnectAtOffset(off64_t offset) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mURI.empty()) {
+ return INVALID_OPERATION;
+ }
+
+ LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnecting...");
+ status_t err = connect_l(mURI.c_str(), &mHeaders, offset);
+ if (err != OK) {
+ LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnect failed w/ err 0x%08x", err);
+ }
+
+ return err;
+}
+
} // namespace android
diff --git a/media/libstagefright/codecs/aacenc/src/bit_cnt.c b/media/libstagefright/codecs/aacenc/src/bit_cnt.c
index dd0b9b4..8853efc 100644
--- a/media/libstagefright/codecs/aacenc/src/bit_cnt.c
+++ b/media/libstagefright/codecs/aacenc/src/bit_cnt.c
@@ -496,7 +496,7 @@
{
Word32 i, t0, t1, t2, t3, t00, t01;
- Word16 codeWord, codeLength;
+ UWord16 codeWord, codeLength;
Word16 sign, signLength;
diff --git a/media/libstagefright/codecs/aacenc/src/memalign.c b/media/libstagefright/codecs/aacenc/src/memalign.c
index 7d20352..44dd4ba 100644
--- a/media/libstagefright/codecs/aacenc/src/memalign.c
+++ b/media/libstagefright/codecs/aacenc/src/memalign.c
@@ -23,6 +23,11 @@
#include "memalign.h"
+#ifdef _MSC_VER
+#include <stddef.h>
+#else
+#include <stdint.h>
+#endif
/*****************************************************************************
*
@@ -66,8 +71,8 @@
pMemop->Set(CodecID, tmp, 0, size + alignment);
mem_ptr =
- (unsigned char *) ((unsigned int) (tmp + alignment - 1) &
- (~((unsigned int) (alignment - 1))));
+ (unsigned char *) ((intptr_t) (tmp + alignment - 1) &
+ (~((intptr_t) (alignment - 1))));
if (mem_ptr == tmp)
mem_ptr += alignment;
diff --git a/media/libstagefright/codecs/amrwbenc/src/cmnMemory.c b/media/libstagefright/codecs/amrwbenc/src/cmnMemory.c
deleted file mode 100644
index dd7c26d..0000000
--- a/media/libstagefright/codecs/amrwbenc/src/cmnMemory.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- ** Copyright 2003-2010, VisualOn, Inc.
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-/*******************************************************************************
- File: cmnMemory.c
-
- Content: sample code for memory operator implementation
-
-*******************************************************************************/
-#include "cmnMemory.h"
-
-#include <malloc.h>
-#if defined LINUX
-#include <string.h>
-#endif
-
-//VO_MEM_OPERATOR g_memOP;
-
-VO_U32 cmnMemAlloc (VO_S32 uID, VO_MEM_INFO * pMemInfo)
-{
- if (!pMemInfo)
- return VO_ERR_INVALID_ARG;
-
- pMemInfo->VBuffer = malloc (pMemInfo->Size);
- return 0;
-}
-
-VO_U32 cmnMemFree (VO_S32 uID, VO_PTR pMem)
-{
- free (pMem);
- return 0;
-}
-
-VO_U32 cmnMemSet (VO_S32 uID, VO_PTR pBuff, VO_U8 uValue, VO_U32 uSize)
-{
- memset (pBuff, uValue, uSize);
- return 0;
-}
-
-VO_U32 cmnMemCopy (VO_S32 uID, VO_PTR pDest, VO_PTR pSource, VO_U32 uSize)
-{
- memcpy (pDest, pSource, uSize);
- return 0;
-}
-
-VO_U32 cmnMemCheck (VO_S32 uID, VO_PTR pBuffer, VO_U32 uSize)
-{
- return 0;
-}
-
-VO_S32 cmnMemCompare (VO_S32 uID, VO_PTR pBuffer1, VO_PTR pBuffer2, VO_U32 uSize)
-{
- return memcmp(pBuffer1, pBuffer2, uSize);
-}
-
-VO_U32 cmnMemMove (VO_S32 uID, VO_PTR pDest, VO_PTR pSource, VO_U32 uSize)
-{
- memmove (pDest, pSource, uSize);
- return 0;
-}
-
diff --git a/media/libstagefright/codecs/avc/dec/Android.mk b/media/libstagefright/codecs/avc/dec/Android.mk
deleted file mode 100644
index 2949a04..0000000
--- a/media/libstagefright/codecs/avc/dec/Android.mk
+++ /dev/null
@@ -1,55 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- src/avcdec_api.cpp \
- src/avc_bitstream.cpp \
- src/header.cpp \
- src/itrans.cpp \
- src/pred_inter.cpp \
- src/pred_intra.cpp \
- src/residual.cpp \
- src/slice.cpp \
- src/vlc.cpp
-
-LOCAL_MODULE := libstagefright_avcdec
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/include \
- $(LOCAL_PATH)/../common/include \
- $(TOP)/frameworks/base/media/libstagefright/include \
- frameworks/base/include/media/stagefright/openmax \
-
-LOCAL_CFLAGS := -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
-
-include $(BUILD_STATIC_LIBRARY)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- SoftAVC.cpp
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/include \
- $(LOCAL_PATH)/../common/include \
- frameworks/base/media/libstagefright/include \
- frameworks/base/include/media/stagefright/openmax \
-
-LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
-
-LOCAL_STATIC_LIBRARIES := \
- libstagefright_avcdec
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright_avc_common \
- libstagefright libstagefright_omx libstagefright_foundation libutils
-
-LOCAL_MODULE := libstagefright_soft_avcdec
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/media/libstagefright/codecs/avc/dec/SoftAVC.cpp b/media/libstagefright/codecs/avc/dec/SoftAVC.cpp
deleted file mode 100644
index 6a476f6..0000000
--- a/media/libstagefright/codecs/avc/dec/SoftAVC.cpp
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "SoftAVC"
-#include <utils/Log.h>
-
-#include "SoftAVC.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/IOMX.h>
-
-#include "avcdec_api.h"
-#include "avcdec_int.h"
-
-namespace android {
-
-static const char kStartCode[4] = { 0x00, 0x00, 0x00, 0x01 };
-
-static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1b },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel11 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel12 },
-};
-
-template<class T>
-static void InitOMXParams(T *params) {
- params->nSize = sizeof(T);
- params->nVersion.s.nVersionMajor = 1;
- params->nVersion.s.nVersionMinor = 0;
- params->nVersion.s.nRevision = 0;
- params->nVersion.s.nStep = 0;
-}
-
-static int32_t Malloc(void *userData, int32_t size, int32_t attrs) {
- return reinterpret_cast<int32_t>(malloc(size));
-}
-
-static void Free(void *userData, int32_t ptr) {
- free(reinterpret_cast<void *>(ptr));
-}
-
-SoftAVC::SoftAVC(
- const char *name,
- const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData,
- OMX_COMPONENTTYPE **component)
- : SimpleSoftOMXComponent(name, callbacks, appData, component),
- mHandle(new tagAVCHandle),
- mInputBufferCount(0),
- mWidth(160),
- mHeight(120),
- mCropLeft(0),
- mCropTop(0),
- mCropRight(mWidth - 1),
- mCropBottom(mHeight - 1),
- mSPSSeen(false),
- mPPSSeen(false),
- mCurrentTimeUs(-1),
- mEOSStatus(INPUT_DATA_AVAILABLE),
- mOutputPortSettingsChange(NONE) {
- initPorts();
- CHECK_EQ(initDecoder(), (status_t)OK);
-}
-
-SoftAVC::~SoftAVC() {
- PVAVCCleanUpDecoder(mHandle);
-
- delete mHandle;
- mHandle = NULL;
-}
-
-void SoftAVC::initPorts() {
- OMX_PARAM_PORTDEFINITIONTYPE def;
- InitOMXParams(&def);
-
- def.nPortIndex = 0;
- def.eDir = OMX_DirInput;
- def.nBufferCountMin = kNumInputBuffers;
- def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 8192;
- def.bEnabled = OMX_TRUE;
- def.bPopulated = OMX_FALSE;
- def.eDomain = OMX_PortDomainVideo;
- def.bBuffersContiguous = OMX_FALSE;
- def.nBufferAlignment = 1;
-
- def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_AVC);
- def.format.video.pNativeRender = NULL;
- def.format.video.nFrameWidth = mWidth;
- def.format.video.nFrameHeight = mHeight;
- def.format.video.nStride = def.format.video.nFrameWidth;
- def.format.video.nSliceHeight = def.format.video.nFrameHeight;
- def.format.video.nBitrate = 0;
- def.format.video.xFramerate = 0;
- def.format.video.bFlagErrorConcealment = OMX_FALSE;
- def.format.video.eCompressionFormat = OMX_VIDEO_CodingAVC;
- def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
- def.format.video.pNativeWindow = NULL;
-
- addPort(def);
-
- def.nPortIndex = 1;
- def.eDir = OMX_DirOutput;
- def.nBufferCountMin = kNumOutputBuffers;
- def.nBufferCountActual = def.nBufferCountMin;
- def.bEnabled = OMX_TRUE;
- def.bPopulated = OMX_FALSE;
- def.eDomain = OMX_PortDomainVideo;
- def.bBuffersContiguous = OMX_FALSE;
- def.nBufferAlignment = 2;
-
- def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
- def.format.video.pNativeRender = NULL;
- def.format.video.nFrameWidth = mWidth;
- def.format.video.nFrameHeight = mHeight;
- def.format.video.nStride = def.format.video.nFrameWidth;
- def.format.video.nSliceHeight = def.format.video.nFrameHeight;
- def.format.video.nBitrate = 0;
- def.format.video.xFramerate = 0;
- def.format.video.bFlagErrorConcealment = OMX_FALSE;
- def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
- def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
- def.format.video.pNativeWindow = NULL;
-
- def.nBufferSize =
- (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
- addPort(def);
-}
-
-status_t SoftAVC::initDecoder() {
- memset(mHandle, 0, sizeof(tagAVCHandle));
- mHandle->AVCObject = NULL;
- mHandle->userData = this;
- mHandle->CBAVC_DPBAlloc = ActivateSPSWrapper;
- mHandle->CBAVC_FrameBind = BindFrameWrapper;
- mHandle->CBAVC_FrameUnbind = UnbindFrame;
- mHandle->CBAVC_Malloc = Malloc;
- mHandle->CBAVC_Free = Free;
-
- return OK;
-}
-
-OMX_ERRORTYPE SoftAVC::internalGetParameter(
- OMX_INDEXTYPE index, OMX_PTR params) {
- switch (index) {
- case OMX_IndexParamVideoPortFormat:
- {
- OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
- (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
- if (formatParams->nPortIndex > 1) {
- return OMX_ErrorUndefined;
- }
-
- if (formatParams->nIndex != 0) {
- return OMX_ErrorNoMore;
- }
-
- if (formatParams->nPortIndex == 0) {
- formatParams->eCompressionFormat = OMX_VIDEO_CodingAVC;
- formatParams->eColorFormat = OMX_COLOR_FormatUnused;
- formatParams->xFramerate = 0;
- } else {
- CHECK_EQ(formatParams->nPortIndex, 1u);
-
- formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
- formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
- formatParams->xFramerate = 0;
- }
-
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoProfileLevelQuerySupported:
- {
- OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
- (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
-
- if (profileLevel->nPortIndex != 0) { // Input port only
- LOGE("Invalid port index: %ld", profileLevel->nPortIndex);
- return OMX_ErrorUnsupportedIndex;
- }
-
- size_t index = profileLevel->nProfileIndex;
- size_t nProfileLevels =
- sizeof(kProfileLevels) / sizeof(kProfileLevels[0]);
- if (index >= nProfileLevels) {
- return OMX_ErrorNoMore;
- }
-
- profileLevel->eProfile = kProfileLevels[index].mProfile;
- profileLevel->eLevel = kProfileLevels[index].mLevel;
- return OMX_ErrorNone;
- }
-
- default:
- return SimpleSoftOMXComponent::internalGetParameter(index, params);
- }
-}
-
-OMX_ERRORTYPE SoftAVC::internalSetParameter(
- OMX_INDEXTYPE index, const OMX_PTR params) {
- switch (index) {
- case OMX_IndexParamStandardComponentRole:
- {
- const OMX_PARAM_COMPONENTROLETYPE *roleParams =
- (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
- if (strncmp((const char *)roleParams->cRole,
- "video_decoder.avc",
- OMX_MAX_STRINGNAME_SIZE - 1)) {
- return OMX_ErrorUndefined;
- }
-
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoPortFormat:
- {
- OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
- (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
- if (formatParams->nPortIndex > 1) {
- return OMX_ErrorUndefined;
- }
-
- if (formatParams->nIndex != 0) {
- return OMX_ErrorNoMore;
- }
-
- return OMX_ErrorNone;
- }
-
- default:
- return SimpleSoftOMXComponent::internalSetParameter(index, params);
- }
-}
-
-OMX_ERRORTYPE SoftAVC::getConfig(
- OMX_INDEXTYPE index, OMX_PTR params) {
- switch (index) {
- case OMX_IndexConfigCommonOutputCrop:
- {
- OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
-
- if (rectParams->nPortIndex != 1) {
- return OMX_ErrorUndefined;
- }
-
- rectParams->nLeft = mCropLeft;
- rectParams->nTop = mCropTop;
- rectParams->nWidth = mCropRight - mCropLeft + 1;
- rectParams->nHeight = mCropBottom - mCropTop + 1;
-
- return OMX_ErrorNone;
- }
-
- default:
- return OMX_ErrorUnsupportedIndex;
- }
-}
-
-static void findNALFragment(
- const OMX_BUFFERHEADERTYPE *inHeader,
- const uint8_t **fragPtr, size_t *fragSize) {
- const uint8_t *data = inHeader->pBuffer + inHeader->nOffset;
-
- size_t size = inHeader->nFilledLen;
-
- CHECK(size >= 4);
- CHECK(!memcmp(kStartCode, data, 4));
-
- size_t offset = 4;
- while (offset + 3 < size && memcmp(kStartCode, &data[offset], 4)) {
- ++offset;
- }
-
- *fragPtr = &data[4];
- if (offset + 3 >= size) {
- *fragSize = size - 4;
- } else {
- *fragSize = offset - 4;
- }
-}
-
-void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
- if (mOutputPortSettingsChange != NONE) {
- return;
- }
-
- List<BufferInfo *> &inQueue = getPortQueue(0);
- List<BufferInfo *> &outQueue = getPortQueue(1);
-
- if (mEOSStatus == OUTPUT_FRAMES_FLUSHED) {
- return;
- }
-
- while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
- && outQueue.size() == kNumOutputBuffers) {
- if (mEOSStatus == INPUT_EOS_SEEN) {
- OMX_BUFFERHEADERTYPE *outHeader;
- if (drainOutputBuffer(&outHeader)) {
- List<BufferInfo *>::iterator it = outQueue.begin();
- while ((*it)->mHeader != outHeader) {
- ++it;
- }
-
- BufferInfo *outInfo = *it;
- outInfo->mOwnedByUs = false;
- outQueue.erase(it);
- outInfo = NULL;
-
- notifyFillBufferDone(outHeader);
- outHeader = NULL;
- return;
- }
-
- BufferInfo *outInfo = *outQueue.begin();
- outHeader = outInfo->mHeader;
-
- outHeader->nOffset = 0;
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- outHeader->nTimeStamp = 0;
-
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
-
- mEOSStatus = OUTPUT_FRAMES_FLUSHED;
- return;
- }
-
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
-
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- mEOSStatus = INPUT_EOS_SEEN;
- continue;
- }
-
- mCurrentTimeUs = inHeader->nTimeStamp;
-
- const uint8_t *fragPtr;
- size_t fragSize;
- findNALFragment(inHeader, &fragPtr, &fragSize);
-
- bool releaseFragment;
- OMX_BUFFERHEADERTYPE *outHeader;
- status_t err = decodeFragment(
- fragPtr, fragSize,
- &releaseFragment, &outHeader);
-
- if (releaseFragment) {
- CHECK_GE(inHeader->nFilledLen, fragSize + 4);
-
- inHeader->nOffset += fragSize + 4;
- inHeader->nFilledLen -= fragSize + 4;
-
- if (inHeader->nFilledLen == 0) {
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
- }
- }
-
- if (outHeader != NULL) {
- List<BufferInfo *>::iterator it = outQueue.begin();
- while ((*it)->mHeader != outHeader) {
- ++it;
- }
-
- BufferInfo *outInfo = *it;
- outInfo->mOwnedByUs = false;
- outQueue.erase(it);
- outInfo = NULL;
-
- notifyFillBufferDone(outHeader);
- outHeader = NULL;
- return;
- }
-
- if (err == INFO_FORMAT_CHANGED) {
- return;
- }
-
- if (err != OK) {
- notify(OMX_EventError, OMX_ErrorUndefined, err, NULL);
- return;
- }
- }
-}
-
-status_t SoftAVC::decodeFragment(
- const uint8_t *fragPtr, size_t fragSize,
- bool *releaseFragment,
- OMX_BUFFERHEADERTYPE **outHeader) {
- *releaseFragment = true;
- *outHeader = NULL;
-
- int nalType;
- int nalRefIdc;
- AVCDec_Status res =
- PVAVCDecGetNALType(
- const_cast<uint8_t *>(fragPtr), fragSize,
- &nalType, &nalRefIdc);
-
- if (res != AVCDEC_SUCCESS) {
- LOGV("cannot determine nal type");
- return ERROR_MALFORMED;
- }
-
- if (nalType != AVC_NALTYPE_SPS && nalType != AVC_NALTYPE_PPS
- && (!mSPSSeen || !mPPSSeen)) {
- // We haven't seen SPS or PPS yet.
- return OK;
- }
-
- switch (nalType) {
- case AVC_NALTYPE_SPS:
- {
- mSPSSeen = true;
-
- res = PVAVCDecSeqParamSet(
- mHandle, const_cast<uint8_t *>(fragPtr),
- fragSize);
-
- if (res != AVCDEC_SUCCESS) {
- return ERROR_MALFORMED;
- }
-
- AVCDecObject *pDecVid = (AVCDecObject *)mHandle->AVCObject;
-
- int32_t width =
- (pDecVid->seqParams[0]->pic_width_in_mbs_minus1 + 1) * 16;
-
- int32_t height =
- (pDecVid->seqParams[0]->pic_height_in_map_units_minus1 + 1) * 16;
-
- int32_t crop_left, crop_right, crop_top, crop_bottom;
- if (pDecVid->seqParams[0]->frame_cropping_flag)
- {
- crop_left = 2 * pDecVid->seqParams[0]->frame_crop_left_offset;
- crop_right =
- width - (2 * pDecVid->seqParams[0]->frame_crop_right_offset + 1);
-
- if (pDecVid->seqParams[0]->frame_mbs_only_flag)
- {
- crop_top = 2 * pDecVid->seqParams[0]->frame_crop_top_offset;
- crop_bottom =
- height -
- (2 * pDecVid->seqParams[0]->frame_crop_bottom_offset + 1);
- }
- else
- {
- crop_top = 4 * pDecVid->seqParams[0]->frame_crop_top_offset;
- crop_bottom =
- height -
- (4 * pDecVid->seqParams[0]->frame_crop_bottom_offset + 1);
- }
- } else {
- crop_bottom = height - 1;
- crop_right = width - 1;
- crop_top = crop_left = 0;
- }
-
- status_t err = OK;
-
- if (mWidth != width || mHeight != height) {
- mWidth = width;
- mHeight = height;
-
- updatePortDefinitions();
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
-
- err = INFO_FORMAT_CHANGED;
- }
-
- if (mCropLeft != crop_left
- || mCropTop != crop_top
- || mCropRight != crop_right
- || mCropBottom != crop_bottom) {
- mCropLeft = crop_left;
- mCropTop = crop_top;
- mCropRight = crop_right;
- mCropBottom = crop_bottom;
-
- notify(OMX_EventPortSettingsChanged,
- 1,
- OMX_IndexConfigCommonOutputCrop,
- NULL);
- }
-
- return err;
- }
-
- case AVC_NALTYPE_PPS:
- {
- mPPSSeen = true;
-
- res = PVAVCDecPicParamSet(
- mHandle, const_cast<uint8_t *>(fragPtr),
- fragSize);
-
- if (res != AVCDEC_SUCCESS) {
- LOGV("PVAVCDecPicParamSet returned error %d", res);
- return ERROR_MALFORMED;
- }
-
- return OK;
- }
-
- case AVC_NALTYPE_SLICE:
- case AVC_NALTYPE_IDR:
- {
- res = PVAVCDecodeSlice(
- mHandle, const_cast<uint8_t *>(fragPtr),
- fragSize);
-
- if (res == AVCDEC_PICTURE_OUTPUT_READY) {
- *releaseFragment = false;
-
- if (!drainOutputBuffer(outHeader)) {
- return UNKNOWN_ERROR;
- }
-
- return OK;
- }
-
- if (res == AVCDEC_PICTURE_READY || res == AVCDEC_SUCCESS) {
- return OK;
- } else {
- LOGV("PVAVCDecodeSlice returned error %d", res);
- return ERROR_MALFORMED;
- }
- }
-
- case AVC_NALTYPE_SEI:
- {
- res = PVAVCDecSEI(
- mHandle, const_cast<uint8_t *>(fragPtr),
- fragSize);
-
- if (res != AVCDEC_SUCCESS) {
- return ERROR_MALFORMED;
- }
-
- return OK;
- }
-
- case AVC_NALTYPE_AUD:
- case AVC_NALTYPE_FILL:
- case AVC_NALTYPE_EOSEQ:
- {
- return OK;
- }
-
- default:
- {
- LOGE("Should not be here, unknown nalType %d", nalType);
-
- return ERROR_MALFORMED;
- }
- }
-
- return OK;
-}
-
-bool SoftAVC::drainOutputBuffer(OMX_BUFFERHEADERTYPE **outHeader) {
- int32_t index;
- int32_t Release;
- AVCFrameIO Output;
- Output.YCbCr[0] = Output.YCbCr[1] = Output.YCbCr[2] = NULL;
- AVCDec_Status status =
- PVAVCDecGetOutput(mHandle, &index, &Release, &Output);
-
- if (status != AVCDEC_SUCCESS) {
- return false;
- }
-
- PortInfo *port = editPortInfo(1);
- CHECK_GE(index, 0);
- CHECK_LT((size_t)index, port->mBuffers.size());
- CHECK(port->mBuffers.editItemAt(index).mOwnedByUs);
-
- *outHeader = port->mBuffers.editItemAt(index).mHeader;
- (*outHeader)->nOffset = 0;
- (*outHeader)->nFilledLen = port->mDef.nBufferSize;
- (*outHeader)->nFlags = 0;
-
- return true;
-}
-
-void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
- if (portIndex == 0) {
- PVAVCDecReset(mHandle);
-
- mEOSStatus = INPUT_DATA_AVAILABLE;
- }
-}
-
-void SoftAVC::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
- if (portIndex != 1) {
- return;
- }
-
- switch (mOutputPortSettingsChange) {
- case NONE:
- break;
-
- case AWAITING_DISABLED:
- {
- CHECK(!enabled);
- mOutputPortSettingsChange = AWAITING_ENABLED;
- break;
- }
-
- default:
- {
- CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
- CHECK(enabled);
- mOutputPortSettingsChange = NONE;
- break;
- }
- }
-}
-
-void SoftAVC::updatePortDefinitions() {
- OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
- def->format.video.nFrameWidth = mWidth;
- def->format.video.nFrameHeight = mHeight;
- def->format.video.nStride = def->format.video.nFrameWidth;
- def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
- def = &editPortInfo(1)->mDef;
- def->format.video.nFrameWidth = mWidth;
- def->format.video.nFrameHeight = mHeight;
- def->format.video.nStride = def->format.video.nFrameWidth;
- def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
- def->nBufferSize =
- (def->format.video.nFrameWidth
- * def->format.video.nFrameHeight * 3) / 2;
-}
-
-// static
-int32_t SoftAVC::ActivateSPSWrapper(
- void *userData, unsigned int sizeInMbs, unsigned int numBuffers) {
- return static_cast<SoftAVC *>(userData)->activateSPS(sizeInMbs, numBuffers);
-}
-
-// static
-int32_t SoftAVC::BindFrameWrapper(
- void *userData, int32_t index, uint8_t **yuv) {
- return static_cast<SoftAVC *>(userData)->bindFrame(index, yuv);
-}
-
-// static
-void SoftAVC::UnbindFrame(void *userData, int32_t index) {
-}
-
-int32_t SoftAVC::activateSPS(
- unsigned int sizeInMbs, unsigned int numBuffers) {
- PortInfo *port = editPortInfo(1);
- CHECK_GE(port->mBuffers.size(), numBuffers);
- CHECK_GE(port->mDef.nBufferSize, (sizeInMbs << 7) * 3);
-
- return 1;
-}
-
-int32_t SoftAVC::bindFrame(int32_t index, uint8_t **yuv) {
- PortInfo *port = editPortInfo(1);
-
- CHECK_GE(index, 0);
- CHECK_LT((size_t)index, port->mBuffers.size());
-
- BufferInfo *outBuffer =
- &port->mBuffers.editItemAt(index);
-
- CHECK(outBuffer->mOwnedByUs);
-
- outBuffer->mHeader->nTimeStamp = mCurrentTimeUs;
- *yuv = outBuffer->mHeader->pBuffer;
-
- return 1;
-}
-
-} // namespace android
-
-android::SoftOMXComponent *createSoftOMXComponent(
- const char *name, const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData, OMX_COMPONENTTYPE **component) {
- return new android::SoftAVC(name, callbacks, appData, component);
-}
diff --git a/media/libstagefright/codecs/avc/dec/SoftAVC.h b/media/libstagefright/codecs/avc/dec/SoftAVC.h
deleted file mode 100644
index 1594b4d..0000000
--- a/media/libstagefright/codecs/avc/dec/SoftAVC.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SOFT_AVC_H_
-
-#define SOFT_AVC_H_
-
-#include "SimpleSoftOMXComponent.h"
-
-struct tagAVCHandle;
-
-namespace android {
-
-struct SoftAVC : public SimpleSoftOMXComponent {
- SoftAVC(const char *name,
- const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData,
- OMX_COMPONENTTYPE **component);
-
-protected:
- virtual ~SoftAVC();
-
- virtual OMX_ERRORTYPE internalGetParameter(
- OMX_INDEXTYPE index, OMX_PTR params);
-
- virtual OMX_ERRORTYPE internalSetParameter(
- OMX_INDEXTYPE index, const OMX_PTR params);
-
- virtual OMX_ERRORTYPE getConfig(OMX_INDEXTYPE index, OMX_PTR params);
-
- virtual void onQueueFilled(OMX_U32 portIndex);
- virtual void onPortFlushCompleted(OMX_U32 portIndex);
- virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
-
-private:
- enum {
- kNumInputBuffers = 4,
- kNumOutputBuffers = 18,
- };
-
- enum EOSStatus {
- INPUT_DATA_AVAILABLE,
- INPUT_EOS_SEEN,
- OUTPUT_FRAMES_FLUSHED,
- };
-
- tagAVCHandle *mHandle;
-
- size_t mInputBufferCount;
-
- int32_t mWidth, mHeight;
- int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
-
- bool mSPSSeen, mPPSSeen;
-
- int64_t mCurrentTimeUs;
-
- EOSStatus mEOSStatus;
-
- enum {
- NONE,
- AWAITING_DISABLED,
- AWAITING_ENABLED
- } mOutputPortSettingsChange;
-
- void initPorts();
- status_t initDecoder();
-
- status_t decodeFragment(
- const uint8_t *fragPtr, size_t fragSize,
- bool *releaseFrames,
- OMX_BUFFERHEADERTYPE **outHeader);
-
- void updatePortDefinitions();
- bool drainOutputBuffer(OMX_BUFFERHEADERTYPE **outHeader);
-
- static int32_t ActivateSPSWrapper(
- void *userData, unsigned int sizeInMbs, unsigned int numBuffers);
-
- static int32_t BindFrameWrapper(
- void *userData, int32_t index, uint8_t **yuv);
-
- static void UnbindFrame(void *userData, int32_t index);
-
- int32_t activateSPS(
- unsigned int sizeInMbs, unsigned int numBuffers);
-
- int32_t bindFrame(int32_t index, uint8_t **yuv);
-
- DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
-};
-
-} // namespace android
-
-#endif // SOFT_AVC_H_
-
diff --git a/media/libstagefright/codecs/avc/dec/include/avcdec_api.h b/media/libstagefright/codecs/avc/dec/include/avcdec_api.h
deleted file mode 100644
index f6a14b7..0000000
--- a/media/libstagefright/codecs/avc/dec/include/avcdec_api.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains application function interfaces to the AVC decoder library
-and necessary type defitionitions and enumerations.
-@publishedAll
-*/
-
-#ifndef _AVCDEC_API_H_
-#define _AVCDEC_API_H_
-
-#include "avcapi_common.h"
-
-/**
- This enumeration is used for the status returned from the library interface.
-*/
-typedef enum
-{
- /**
- The followings are fail with details. Their values are negative.
- */
- AVCDEC_NO_DATA = -4,
- AVCDEC_PACKET_LOSS = -3,
- /**
- Fail information
- */
- AVCDEC_NO_BUFFER = -2, /* no output picture buffer available */
- AVCDEC_MEMORY_FAIL = -1, /* memory allocation failed */
- AVCDEC_FAIL = 0,
- /**
- Generic success value
- */
- AVCDEC_SUCCESS = 1,
- AVCDEC_PICTURE_OUTPUT_READY = 2,
- AVCDEC_PICTURE_READY = 3,
-
- /**
- The followings are success with warnings. Their values are positive integers.
- */
- AVCDEC_NO_NEXT_SC = 4,
- AVCDEC_REDUNDANT_FRAME = 5,
- AVCDEC_CONCEALED_FRAME = 6 /* detect and conceal the error */
-} AVCDec_Status;
-
-
-/**
-This structure contains sequence parameters information.
-*/
-typedef struct tagAVCDecSPSInfo
-{
- int FrameWidth;
- int FrameHeight;
- uint frame_only_flag;
- int frame_crop_left;
- int frame_crop_right;
- int frame_crop_top;
- int frame_crop_bottom;
-
-} AVCDecSPSInfo;
-
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
- /** THE FOLLOWINGS ARE APIS */
- /**
- This function parses one NAL unit from byte stream format input according to Annex B.
- \param "bitstream" "Pointer to the bitstream buffer."
- \param "nal_unit" "Point to pointer and the location of the start of the first NAL unit
- found in bitstream."
- \param "size" "As input, the pointer to the size of bitstream in bytes. As output,
- the value is changed to be the size of the found NAL unit."
- \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL if no first start code is found, AVCDEC_NO_NEX_SC if
- the first start code is found, but the second start code is missing (potential partial NAL)."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit, int *size);
-
- /**
- This function sniffs the nal_unit_type such that users can call corresponding APIs.
- \param "bitstream" "Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.)."
- \param "size" "size of the bitstream (NumBytesInNALunit + 1)."
- \param "nal_unit_type" "Pointer to the return value of nal unit type."
- \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL otherwise."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc);
-
- /**
- This function decodes the sequence parameters set, initializes related parameters and
- allocates memory (reference frames list), must also be compliant with Annex A.
- It is equivalent to decode VOL header of MPEG4.
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "nal_unit" "Pointer to the buffer containing single NAL unit.
- The content will change due to EBSP-to-RBSP conversion."
- \param "nal_size" "size of the bitstream NumBytesInNALunit."
- \return "AVCDEC_SUCCESS if success,
- AVCDEC_FAIL if profile and level is not supported,
- AVCDEC_MEMORY_FAIL if memory allocations return null."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);
-
- /**
- This function returns sequence parameters such as dimension and field flag of the most recently
- decoded SPS. More can be added later or grouped together into a structure. This API can be called
- after PVAVCInitSequence. If no sequence parameter has been decoded yet, it will return AVCDEC_FAIL.
-
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "seqInfo" "Pointer to the AVCDecSeqParamInfo structure."
- \return "AVCDEC_SUCCESS if success and AVCDEC_FAIL if fail."
- \note "This API can be combined with PVAVCInitSequence if wanted to be consistent with m4vdec lib."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo);
-
- /**
- This function decodes the picture parameters set and initializes related parameters. Note thate
- the PPS may not be present for every picture.
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "nal_unit" "Pointer to the buffer containing single NAL unit.
- The content will change due to EBSP-to-RBSP conversion."
- \param "nal_size" "size of the bitstream NumBytesInNALunit."
- \return "AVCDEC_SUCCESS if success, AVCDEC_FAIL if profile and level is not supported."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);
-
- /**
- This function decodes one NAL unit of bitstream. The type of nal unit is one of the
- followings, 1, 5. (for now, no data partitioning, type 2,3,4).
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "nal_unit" "Pointer to the buffer containing a single or partial NAL unit.
- The content will change due to EBSP-to-RBSP conversion."
- \param "buf_size" "Size of the buffer (less than or equal nal_size)."
- \param "nal_size" "size of the current NAL unit NumBytesInNALunit."
- \return "AVCDEC_PICTURE_READY for success and an output is ready,
- AVCDEC_SUCCESS for success but no output is ready,
- AVCDEC_PACKET_LOSS is GetData returns AVCDEC_PACKET_LOSS,
- AVCDEC_FAIL if syntax error is detected,
- AVCDEC_MEMORY_FAIL if memory is corrupted.
- AVCDEC_NO_PICTURE if no frame memory to write to (users need to get output and/or return picture).
- AVCDEC_REDUNDANT_PICTURE if error has been detected in the primary picture and redundant picture is available,
- AVCDEC_CONCEALED_PICTURE if error has been detected and decoder has concealed it."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);
-
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer, int buf_size);
-
- /**
- Check the availability of the decoded picture in decoding order (frame_num).
- The AVCFrameIO also provide displaying order information such that the application
- can re-order the frame for display. A picture can be retrieved only once.
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "output" "Pointer to the AVCOutput structure. Note that decoder library will
- not re-used the pixel memory in this structure until it has been returned
- thru PVAVCReleaseOutput API."
- \return "AVCDEC_SUCCESS for success, AVCDEC_FAIL if no picture is available to be displayed,
- AVCDEC_PICTURE_READY if there is another picture to be displayed."
- */
- OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release_flag, AVCFrameIO *output);
-
- /**
- This function resets the decoder and expects to see the next IDR slice.
- \param "avcHandle" "Handle to the AVC decoder library object."
- */
- OSCL_IMPORT_REF void PVAVCDecReset(AVCHandle *avcHandle);
-
- /**
- This function performs clean up operation including memory deallocation.
- \param "avcHandle" "Handle to the AVC decoder library object."
- */
- OSCL_IMPORT_REF void PVAVCCleanUpDecoder(AVCHandle *avcHandle);
-//AVCDec_Status EBSPtoRBSP(uint8 *nal_unit,int *size);
-
-
-
- /** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */
- /** In AVCHandle structure, userData is a pointer to an object with the following
- member functions.
- */
- AVCDec_Status CBAVCDec_GetData(uint32 *userData, unsigned char **buffer, unsigned int *size);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _AVCDEC_API_H_ */
-
diff --git a/media/libstagefright/codecs/avc/dec/include/pvavcdecoder.h b/media/libstagefright/codecs/avc/dec/include/pvavcdecoder.h
deleted file mode 100644
index 6b196de..0000000
--- a/media/libstagefright/codecs/avc/dec/include/pvavcdecoder.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#ifndef PVAVCDECODER_H_INCLUDED
-#define PVAVCDECODER_H_INCLUDED
-
-#ifndef PVAVCDECODERINTERFACE_H_INCLUDED
-#include "pvavcdecoderinterface.h"
-#endif
-
-// AVC video decoder
-class PVAVCDecoder : public PVAVCDecoderInterface
-{
- public:
- virtual ~PVAVCDecoder();
- static PVAVCDecoder* New(void);
- virtual bool InitAVCDecoder(FunctionType_SPS, FunctionType_Alloc, FunctionType_Unbind,
- FunctionType_Malloc, FunctionType_Free, void *);
- virtual void CleanUpAVCDecoder(void);
- virtual void ResetAVCDecoder(void);
- virtual int32 DecodeSPS(uint8 *bitstream, int32 buffer_size);
- virtual int32 DecodePPS(uint8 *bitstream, int32 buffer_size);
- virtual int32 DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size);
- virtual bool GetDecOutput(int *indx, int *release);
- virtual void GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right);
- int AVC_Malloc(int32 size, int attribute);
- void AVC_Free(int mem);
-
- private:
- PVAVCDecoder();
- bool Construct(void);
- void *iAVCHandle;
-};
-
-#endif
diff --git a/media/libstagefright/codecs/avc/dec/include/pvavcdecoderinterface.h b/media/libstagefright/codecs/avc/dec/include/pvavcdecoderinterface.h
deleted file mode 100644
index 027212d..0000000
--- a/media/libstagefright/codecs/avc/dec/include/pvavcdecoderinterface.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#ifndef PVAVCDECODERINTERFACE_H_INCLUDED
-#define PVAVCDECODERINTERFACE_H_INCLUDED
-
-typedef void (*FunctionType_Unbind)(void *, int);
-typedef int (*FunctionType_Alloc)(void *, int, uint8 **);
-typedef int (*FunctionType_SPS)(void *, uint, uint);
-typedef int (*FunctionType_Malloc)(void *, int32, int);
-typedef void(*FunctionType_Free)(void *, int);
-
-
-// PVAVCDecoderInterface pure virtual interface class
-class PVAVCDecoderInterface
-{
- public:
- virtual ~PVAVCDecoderInterface() {};
- virtual bool InitAVCDecoder(FunctionType_SPS, FunctionType_Alloc, FunctionType_Unbind,
- FunctionType_Malloc, FunctionType_Free, void *) = 0;
- virtual void CleanUpAVCDecoder(void) = 0;
- virtual void ResetAVCDecoder(void) = 0;
- virtual int32 DecodeSPS(uint8 *bitstream, int32 buffer_size) = 0;
- virtual int32 DecodePPS(uint8 *bitstream, int32 buffer_size) = 0;
- virtual int32 DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size) = 0;
- virtual bool GetDecOutput(int *indx, int *release) = 0;
- virtual void GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right) = 0;
-// virtual int AVC_Malloc(int32 size, int attribute);
-// virtual void AVC_Free(int mem);
-};
-
-#endif // PVAVCDECODERINTERFACE_H_INCLUDED
-
-
diff --git a/media/libstagefright/codecs/avc/dec/src/avc_bitstream.cpp b/media/libstagefright/codecs/avc/dec/src/avc_bitstream.cpp
deleted file mode 100644
index 270b664..0000000
--- a/media/libstagefright/codecs/avc/dec/src/avc_bitstream.cpp
+++ /dev/null
@@ -1,276 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcdec_bitstream.h"
-
-/* Swapping may not be needed anymore since we read one byte at a time and perform
-EBSP to RBSP conversion in bitstream. */
-#ifdef LITTLE_ENDIAN
-#if (WORD_SIZE==32) /* this can be replaced with assembly instructions */
-#define SWAP_BYTES(x) ((((x)&0xFF)<<24) | (((x)&0xFF00)<<8) | (((x)&0xFF0000)>>8) | (((x)&0xFF000000)>>24))
-#else /* for 16-bit */
-#define SWAP_BYTES(x) ((((x)&0xFF)<<8) | (((x)&0xFF00)>>8))
-#endif
-#else
-#define SWAP_BYTES(x) (x)
-#endif
-
-
-/* array for trailing bit pattern as function of number of bits */
-/* the first one is unused. */
-const static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};
-
-/* ======================================================================== */
-/* Function : BitstreamInit() */
-/* Date : 11/4/2003 */
-/* Purpose : Populate bitstream structure with bitstream buffer and size */
-/* it also initializes internal data */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if failed. */
-/* Modified : */
-/* ======================================================================== */
-/* |--------|--------|----~~~~~-----|---------|---------|---------|
- ^ ^read_pos ^data_end_pos
- bitstreamBuffer <--------->
- current_word
-
- |xxxxxxxxxxxxx----| = current_word 32 or 16 bits
- <------------>
- bit_left
- ======================================================================== */
-
-
-/* ======================================================================== */
-/* Function : BitstreamNextWord() */
-/* Date : 12/4/2003 */
-/* Purpose : Read up to machine word. */
-/* In/out : */
-/* Return : Next word with emulation prevention code removed. Everything
- in the bitstream structure got modified except current_word */
-/* Modified : */
-/* ======================================================================== */
-
-AVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size)
-{
- EBSPtoRBSP(buffer, &size);
-
- stream->incnt = 0;
- stream->incnt_next = 0;
- stream->bitcnt = 0;
- stream->curr_word = stream->next_word = 0;
- stream->read_pos = 0;
-
- stream->bitstreamBuffer = buffer;
-
- stream->data_end_pos = size;
-
- stream->nal_size = size;
-
- return AVCDEC_SUCCESS;
-}
-/* ======================================================================== */
-/* Function : AVC_BitstreamFillCache() */
-/* Date : 1/1/2005 */
-/* Purpose : Read up to machine word. */
-/* In/out : */
-/* Return : Read in 4 bytes of input data */
-/* Modified : */
-/* ======================================================================== */
-
-AVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream)
-{
- uint8 *bitstreamBuffer = stream->bitstreamBuffer;
- uint8 *v;
- int num_bits, i;
-
- stream->curr_word |= (stream->next_word >> stream->incnt); // stream->incnt cannot be 32
- stream->next_word <<= (31 - stream->incnt);
- stream->next_word <<= 1;
- num_bits = stream->incnt_next + stream->incnt;
- if (num_bits >= 32)
- {
- stream->incnt_next -= (32 - stream->incnt);
- stream->incnt = 32;
- return AVCDEC_SUCCESS;
- }
- /* this check can be removed if there is additional extra 4 bytes at the end of the bitstream */
- v = bitstreamBuffer + stream->read_pos;
-
- if (stream->read_pos > stream->data_end_pos - 4)
- {
- if (stream->data_end_pos <= stream->read_pos)
- {
- stream->incnt = num_bits;
- stream->incnt_next = 0;
- return AVCDEC_SUCCESS;
- }
-
- stream->next_word = 0;
-
- for (i = 0; i < stream->data_end_pos - stream->read_pos; i++)
- {
- stream->next_word |= (v[i] << ((3 - i) << 3));
- }
-
- stream->read_pos = stream->data_end_pos;
- stream->curr_word |= (stream->next_word >> num_bits); // this is safe
-
- stream->next_word <<= (31 - num_bits);
- stream->next_word <<= 1;
- num_bits = i << 3;
- stream->incnt += stream->incnt_next;
- stream->incnt_next = num_bits - (32 - stream->incnt);
- if (stream->incnt_next < 0)
- {
- stream->incnt += num_bits;
- stream->incnt_next = 0;
- }
- else
- {
- stream->incnt = 32;
- }
- return AVCDEC_SUCCESS;
- }
-
- stream->next_word = ((uint32)v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3];
- stream->read_pos += 4;
-
- stream->curr_word |= (stream->next_word >> num_bits); // this is safe
- stream->next_word <<= (31 - num_bits);
- stream->next_word <<= 1;
- stream->incnt_next += stream->incnt;
- stream->incnt = 32;
- return AVCDEC_SUCCESS;
-
-}
-/* ======================================================================== */
-/* Function : BitstreamReadBits() */
-/* Date : 11/4/2003 */
-/* Purpose : Read up to machine word. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */
-/* is greater than the word-size, AVCDEC_PACKET_LOSS or */
-/* AVCDEC_NO_DATA if callback to get data fails. */
-/* Modified : */
-/* ======================================================================== */
-AVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code)
-{
- if (stream->incnt < nBits)
- {
- /* frame-based decoding */
- AVC_BitstreamFillCache(stream);
- }
- *code = stream->curr_word >> (32 - nBits);
- BitstreamFlushBits(stream, nBits);
- return AVCDEC_SUCCESS;
-}
-
-
-
-/* ======================================================================== */
-/* Function : BitstreamShowBits() */
-/* Date : 11/4/2003 */
-/* Purpose : Show up to machine word without advancing the pointer. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */
-/* is greater than the word-size, AVCDEC_NO_DATA if it needs */
-/* to callback to get data. */
-/* Modified : */
-/* ======================================================================== */
-AVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code)
-{
- if (stream->incnt < nBits)
- {
- /* frame-based decoding */
- AVC_BitstreamFillCache(stream);
- }
-
- *code = stream->curr_word >> (32 - nBits);
-
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : BitstreamRead1Bit() */
-/* Date : 11/4/2003 */
-/* Purpose : Read 1 bit from the bitstream. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits */
-/* is greater than the word-size, AVCDEC_PACKET_LOSS or */
-/* AVCDEC_NO_DATA if callback to get data fails. */
-/* Modified : */
-/* ======================================================================== */
-
-AVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code)
-{
- if (stream->incnt < 1)
- {
- /* frame-based decoding */
- AVC_BitstreamFillCache(stream);
- }
- *code = stream->curr_word >> 31;
- BitstreamFlushBits(stream, 1);
- return AVCDEC_SUCCESS;
-}
-
-
-
-AVCDec_Status BitstreamByteAlign(AVCDecBitstream *stream)
-{
- uint n_stuffed;
-
- n_stuffed = (8 - (stream->bitcnt & 0x7)) & 0x7; /* 07/05/01 */
-
- stream->bitcnt += n_stuffed;
- stream->incnt -= n_stuffed;
-
- if (stream->incnt < 0)
- {
- stream->bitcnt += stream->incnt;
- stream->incnt = 0;
- }
- stream->curr_word <<= n_stuffed;
- return AVCDEC_SUCCESS;
-}
-
-/* check whether there are more RBSP data. */
-/* ignore the emulation prevention code, assume it has been taken out. */
-bool more_rbsp_data(AVCDecBitstream *stream)
-{
- int total_bit_left;
- uint code;
-
- if (stream->read_pos >= stream->nal_size)
- {
- total_bit_left = stream->incnt_next + stream->incnt;
- if (total_bit_left <= 0)
- {
- return FALSE;
- }
- else if (total_bit_left <= 8)
- {
- BitstreamShowBits(stream, total_bit_left, &code);
- if (code == trailing_bits[total_bit_left])
- {
- return FALSE;
- }
- }
- }
-
- return TRUE;
-}
-
diff --git a/media/libstagefright/codecs/avc/dec/src/avcdec_api.cpp b/media/libstagefright/codecs/avc/dec/src/avcdec_api.cpp
deleted file mode 100644
index 0a75f17..0000000
--- a/media/libstagefright/codecs/avc/dec/src/avcdec_api.cpp
+++ /dev/null
@@ -1,1036 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains application function interfaces to the AVC decoder library.
-@publishedAll
-*/
-
-#include <string.h>
-
-#include "avcdec_api.h"
-#include "avcdec_lib.h"
-#include "avcdec_bitstream.h"
-
-/* ======================================================================== */
-/* Function : EBSPtoRBSP() */
-/* Date : 11/4/2003 */
-/* Purpose : Convert EBSP to RBSP and overwrite it. */
-/* Assuming that forbidden_zero, nal_ref_idc and nal_unit_type */
-/* (first byte), has been taken out of the nal_unit. */
-/* In/out : */
-/* Return : */
-/* Modified : */
-/* ======================================================================== */
-/**
-@pseudocode "
- NumBytesInRBSP = 0;
- for(i=0:i< *size; i++){
- if(i+2 < *size && next_bits(24)==0x000003){
- rbsp_byte[NumBytesInRBSP++];
- rbsp_byte[NumBytesInRBSP++];
- i+=2;
- emulation_prevention_three_byte (0x03)
- }
- else
- rbsp_byte[NumBytesInRBSP++];
- }"
-*/
-AVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size)
-{
- int i, j;
- int count = 0;
-
- /* This code is based on EBSPtoRBSP of JM */
- j = 0;
-
- for (i = 0; i < *size; i++)
- {
- if (count == 2 && nal_unit[i] == 0x03)
- {
- i++;
- count = 0;
- }
- nal_unit[j] = nal_unit[i];
- if (nal_unit[i] == 0x00)
- count++;
- else
- count = 0;
- j++;
- }
-
- *size = j;
-
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCAnnexBGetNALUnit() */
-/* Date : 11/3/2003 */
-/* Purpose : Parse a NAL from byte stream format. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */
-/* Modified : */
-/* ======================================================================== */
-/**
-@pseudocode "
- byte_stream_nal_unit(NumBytesInNalunit){
- while(next_bits(24) != 0x000001)
- zero_byte
- if(more_data_in_byte_stream()){
- start_code_prefix_one_3bytes // equal 0x000001
- nal_unit(NumBytesInNALunit)
- }
- }"
-*/
-OSCL_EXPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit,
- int *size)
-{
- int i, j, FoundStartCode = 0;
- int end;
-
- i = 0;
- while (bitstream[i] == 0 && i < *size)
- {
- i++;
- }
- if (i >= *size)
- {
- *nal_unit = bitstream;
- return AVCDEC_FAIL; /* cannot find any start_code_prefix. */
- }
- else if (bitstream[i] != 0x1)
- {
- i = -1; /* start_code_prefix is not at the beginning, continue */
- }
-
- i++;
- *nal_unit = bitstream + i; /* point to the beginning of the NAL unit */
-
- j = end = i;
- while (!FoundStartCode)
- {
- while ((j + 1 < *size) && (bitstream[j] != 0 || bitstream[j+1] != 0)) /* see 2 consecutive zero bytes */
- {
- j++;
- }
- end = j; /* stop and check for start code */
- while (j + 2 < *size && bitstream[j+2] == 0) /* keep reading for zero byte */
- {
- j++;
- }
- if (j + 2 >= *size)
- {
- *size -= i;
- return AVCDEC_NO_NEXT_SC; /* cannot find the second start_code_prefix */
- }
- if (bitstream[j+2] == 0x1)
- {
- FoundStartCode = 1;
- }
- else
- {
- /* could be emulation code 0x3 */
- j += 2; /* continue the search */
- }
- }
-
- *size = end - i;
-
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCGetNALType() */
-/* Date : 11/4/2003 */
-/* Purpose : Sniff NAL type from the bitstream */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size,
- int *nal_type, int *nal_ref_idc)
-{
- int forbidden_zero_bit;
- if (size > 0)
- {
- forbidden_zero_bit = bitstream[0] >> 7;
- if (forbidden_zero_bit != 0)
- return AVCDEC_FAIL;
- *nal_ref_idc = (bitstream[0] & 0x60) >> 5;
- *nal_type = bitstream[0] & 0x1F;
- return AVCDEC_SUCCESS;
- }
-
- return AVCDEC_FAIL;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCDecSeqParamSet() */
-/* Date : 11/4/2003 */
-/* Purpose : Initialize sequence, memory allocation if necessary. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */
-/* Modified : */
-/* ======================================================================== */
-
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit,
- int nal_size)
-{
- AVCDec_Status status;
- AVCDecObject *decvid;
- AVCCommonObj *video;
- AVCDecBitstream *bitstream;
- void *userData = avcHandle->userData;
- bool first_seq = FALSE;
- int i;
-
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "PVAVCDecSeqParamSet", -1, -1);
-
- if (avcHandle->AVCObject == NULL)
- {
- first_seq = TRUE;
-
- //avcHandle->memory_usage = 0;
- /* allocate AVCDecObject */
- avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecObject), 0/*DEFAULT_ATTR*/);
- if (avcHandle->AVCObject == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
-
- decvid = (AVCDecObject*) avcHandle->AVCObject;
-
- memset(decvid, 0, sizeof(AVCDecObject));
-
- decvid->common = (AVCCommonObj*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), 0);
- if (decvid->common == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
-
- video = decvid->common;
- memset(video, 0, sizeof(AVCCommonObj));
-
- video->seq_parameter_set_id = 9999; /* set it to some illegal value */
-
- decvid->bitstream = (AVCDecBitstream *) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecBitstream), 1/*DEFAULT_ATTR*/);
- if (decvid->bitstream == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
-
- decvid->bitstream->userData = avcHandle->userData; /* callback for more data */
- decvid->avcHandle = avcHandle;
- decvid->debugEnable = avcHandle->debugEnable;
- }
-
- decvid = (AVCDecObject*) avcHandle->AVCObject;
- video = decvid->common;
- bitstream = decvid->bitstream;
-
- /* check if we can reuse the memory without re-allocating it. */
- /* always check if(first_seq==TRUE) */
-
- /* Conversion from EBSP to RBSP */
- video->forbidden_bit = nal_unit[0] >> 7;
- if (video->forbidden_bit) return AVCDEC_FAIL;
- video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5;
- video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F);
-
- if (video->nal_unit_type != AVC_NALTYPE_SPS) /* not a SPS NAL */
- {
- return AVCDEC_FAIL;
- }
-
- /* Initialize bitstream structure*/
- BitstreamInit(bitstream, nal_unit + 1, nal_size - 1);
-
- /* if first_seq == TRUE, allocate the following memory */
- if (first_seq == TRUE)
- {
- video->currSeqParams = NULL; /* initialize it to NULL */
- video->currPicParams = NULL;
-
- /* There are 32 pointers to sequence param set, seqParams.
- There are 255 pointers to picture param set, picParams.*/
- for (i = 0; i < 32; i++)
- decvid->seqParams[i] = NULL;
-
- for (i = 0; i < 256; i++)
- decvid->picParams[i] = NULL;
-
- video->MbToSliceGroupMap = NULL;
-
- video->mem_mgr_ctrl_eq_5 = FALSE;
- video->newPic = TRUE;
- video->newSlice = TRUE;
- video->currPic = NULL;
- video->currFS = NULL;
- video->prevRefPic = NULL;
-
- video->mbNum = 0; // MC_Conceal
- /* Allocate sliceHdr. */
-
- video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), 5/*DEFAULT_ATTR*/);
- if (video->sliceHdr == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
-
- video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), 3/*DEFAULT_ATTR*/);
- if (video->decPicBuf == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
- memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer));
- }
-
- /* Decode SPS, allocate video->seqParams[i] and assign video->currSeqParams */
- status = DecodeSPS(decvid, bitstream);
-
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCDecGetSeqInfo() */
-/* Date : 11/4/2003 */
-/* Purpose : Get sequence parameter info. after SPS NAL is decoded. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */
-/* Modified : */
-/* 12/20/03: change input argument, use structure instead. */
-/* ======================================================================== */
-
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo)
-{
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- int PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs;
-
- if (decvid == NULL || decvid->seqParams[0] == NULL)
- {
- return AVCDEC_FAIL;
- }
-
- video = decvid->common;
-
- PicWidthInMbs = decvid->seqParams[0]->pic_width_in_mbs_minus1 + 1;
- PicHeightInMapUnits = decvid->seqParams[0]->pic_height_in_map_units_minus1 + 1 ;
- FrameHeightInMbs = (2 - decvid->seqParams[0]->frame_mbs_only_flag) * PicHeightInMapUnits ;
-
- seqInfo->FrameWidth = PicWidthInMbs << 4;
- seqInfo->FrameHeight = FrameHeightInMbs << 4;
-
- seqInfo->frame_only_flag = decvid->seqParams[0]->frame_mbs_only_flag;
-
- if (decvid->seqParams[0]->frame_cropping_flag)
- {
- seqInfo->frame_crop_left = 2 * decvid->seqParams[0]->frame_crop_left_offset;
- seqInfo->frame_crop_right = seqInfo->FrameWidth - (2 * decvid->seqParams[0]->frame_crop_right_offset + 1);
-
- if (seqInfo->frame_only_flag)
- {
- seqInfo->frame_crop_top = 2 * decvid->seqParams[0]->frame_crop_top_offset;
- seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (2 * decvid->seqParams[0]->frame_crop_bottom_offset + 1);
- /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top
- such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/2, respectively. */
- }
- else
- {
- seqInfo->frame_crop_top = 4 * decvid->seqParams[0]->frame_crop_top_offset;
- seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (4 * decvid->seqParams[0]->frame_crop_bottom_offset + 1);
- /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top
- such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/4, respectively. */
- }
- }
- else /* no cropping flag, just give the first and last pixel */
- {
- seqInfo->frame_crop_bottom = seqInfo->FrameHeight - 1;
- seqInfo->frame_crop_right = seqInfo->FrameWidth - 1;
- seqInfo->frame_crop_top = seqInfo->frame_crop_left = 0;
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCDecPicParamSet() */
-/* Date : 11/4/2003 */
-/* Purpose : Initialize picture */
-/* create reference picture list. */
-/* In/out : */
-/* Return : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail. */
-/* Modified : */
-/* ======================================================================== */
-/**
-Since PPS doesn't contain much data, most of the picture initialization will
-be done after decoding the slice header in PVAVCDecodeSlice. */
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit,
- int nal_size)
-{
- AVCDec_Status status;
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- AVCDecBitstream *bitstream;
-
- if (decvid == NULL)
- {
- return AVCDEC_FAIL;
- }
-
- video = decvid->common;
- bitstream = decvid->bitstream;
- /* 1. Convert EBSP to RBSP. Create bitstream structure */
- video->forbidden_bit = nal_unit[0] >> 7;
- video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5;
- video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F);
-
- if (video->nal_unit_type != AVC_NALTYPE_PPS) /* not a PPS NAL */
- {
- return AVCDEC_FAIL;
- }
-
-
- /* 2. Initialize bitstream structure*/
- BitstreamInit(bitstream, nal_unit + 1, nal_size - 1);
-
- /* 2. Decode pic_parameter_set_rbsp syntax. Allocate video->picParams[i] and assign to currPicParams */
- status = DecodePPS(decvid, video, bitstream);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
-
- video->SliceGroupChangeRate = video->currPicParams->slice_group_change_rate_minus1 + 1 ;
-
- return AVCDEC_SUCCESS;
-}
-
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit,
- int nal_size)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(nal_unit);
- OSCL_UNUSED_ARG(nal_size);
-
- return AVCDEC_SUCCESS;
-}
-/* ======================================================================== */
-/* Function : PVAVCDecodeSlice() */
-/* Date : 11/4/2003 */
-/* Purpose : Decode one NAL unit. */
-/* In/out : */
-/* Return : See enum AVCDec_Status for return values. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer,
- int buf_size)
-{
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- AVCDecBitstream *bitstream;
- AVCDec_Status status;
-
- if (decvid == NULL)
- {
- return AVCDEC_FAIL;
- }
-
- video = decvid->common;
- bitstream = decvid->bitstream;
-
- if (video->mem_mgr_ctrl_eq_5)
- {
- return AVCDEC_PICTURE_OUTPUT_READY; // to flushout frame buffers
- }
-
- if (video->newSlice)
- {
- /* 2. Check NAL type */
- if (buffer == NULL)
- {
- return AVCDEC_FAIL;
- }
- video->prev_nal_unit_type = video->nal_unit_type;
- video->forbidden_bit = buffer[0] >> 7;
- video->nal_ref_idc = (buffer[0] & 0x60) >> 5;
- video->nal_unit_type = (AVCNalUnitType)(buffer[0] & 0x1F);
-
-
- if (video->nal_unit_type == AVC_NALTYPE_AUD)
- {
- return AVCDEC_SUCCESS;
- }
-
- if (video->nal_unit_type != AVC_NALTYPE_SLICE &&
- video->nal_unit_type != AVC_NALTYPE_IDR)
- {
- return AVCDEC_FAIL; /* not supported */
- }
-
-
-
- if (video->nal_unit_type >= 2 && video->nal_unit_type <= 4)
- {
- return AVCDEC_FAIL; /* not supported */
- }
- else
- {
- video->slice_data_partitioning = FALSE;
- }
-
- video->newSlice = FALSE;
- /* Initialize bitstream structure*/
- BitstreamInit(bitstream, buffer + 1, buf_size - 1);
-
-
- /* 2.1 Decode Slice Header (separate function)*/
- status = DecodeSliceHeader(decvid, video, bitstream);
- if (status != AVCDEC_SUCCESS)
- {
- video->newSlice = TRUE;
- return status;
- }
-
- if (video->sliceHdr->frame_num != video->prevFrameNum || (video->sliceHdr->first_mb_in_slice < (uint)video->mbNum && video->currSeqParams->constrained_set1_flag == 1))
- {
- video->newPic = TRUE;
- if (video->numMBs > 0)
- {
- // Conceal missing MBs of previously decoded frame
- ConcealSlice(decvid, video->PicSizeInMbs - video->numMBs, video->PicSizeInMbs); // Conceal
- video->numMBs = 0;
-
- // DeblockPicture(video); // No need to deblock
-
- /* 3.2 Decoded frame reference marking. */
- /* 3.3 Put the decoded picture in output buffers */
- /* set video->mem_mge_ctrl_eq_5 */
- AVCNalUnitType temp = video->nal_unit_type;
- video->nal_unit_type = video->prev_nal_unit_type;
- StorePictureInDPB(avcHandle, video);
- video->nal_unit_type = temp;
- video->mbNum = 0; // MC_Conceal
- return AVCDEC_PICTURE_OUTPUT_READY;
- }
- }
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->prevFrameNum = 0;
- video->PrevRefFrameNum = 0;
- }
-
- if (!video->currSeqParams->gaps_in_frame_num_value_allowed_flag)
- { /* no gaps allowed, frame_num has to increase by one only */
- /* if(sliceHdr->frame_num != (video->PrevRefFrameNum + 1)%video->MaxFrameNum) */
- if (video->sliceHdr->frame_num != video->PrevRefFrameNum && video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum)
- {
- // Conceal missing MBs of previously decoded frame
- video->numMBs = 0;
- video->newPic = TRUE;
- video->prevFrameNum++; // FIX
- video->PrevRefFrameNum++;
- AVCNalUnitType temp = video->nal_unit_type;
- video->nal_unit_type = AVC_NALTYPE_SLICE; //video->prev_nal_unit_type;
- status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
- video->currFS->IsOutputted = 0x01;
- video->currFS->IsReference = 3;
- video->currFS->IsLongTerm = 0;
-
- DecodePOC(video);
- /* find an empty memory from DPB and assigned to currPic */
- DPBInitPic(video, video->PrevRefFrameNum % video->MaxFrameNum);
- RefListInit(video);
- ConcealSlice(decvid, 0, video->PicSizeInMbs); // Conceal
- video->currFS->IsOutputted |= 0x02;
- //conceal frame
- /* 3.2 Decoded frame reference marking. */
- /* 3.3 Put the decoded picture in output buffers */
- /* set video->mem_mge_ctrl_eq_5 */
- video->mbNum = 0; // Conceal
- StorePictureInDPB(avcHandle, video);
- video->nal_unit_type = temp;
-
- return AVCDEC_PICTURE_OUTPUT_READY;
- }
- }
- }
-
- if (video->newPic == TRUE)
- {
- status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
- }
-
- video->newSlice = TRUE;
-
- /* function pointer setting at slice-level */
- // OPTIMIZE
- decvid->residual_block = &residual_block_cavlc;
-
- /* derive picture order count */
- if (video->newPic == TRUE)
- {
- video->numMBs = video->PicSizeInMbs;
-
- if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currSeqParams->gaps_in_frame_num_value_allowed_flag)
- {
- if (video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum)
- {
- status = fill_frame_num_gap(avcHandle, video);
- if (status != AVCDEC_SUCCESS)
- {
- video->numMBs = 0;
- return status;
- }
-
- status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);
- if (status != AVCDEC_SUCCESS)
- {
- video->numMBs = 0;
- return status;
- }
-
-
- }
- }
- /* if there's gap in the frame_num, we have to fill in the gap with
- imaginary frames that won't get used for short-term ref. */
- /* see fill_frame_num_gap() in JM */
-
-
- DecodePOC(video);
- /* find an empty memory from DPB and assigned to currPic */
- DPBInitPic(video, video->CurrPicNum);
-
- video->currPic->isReference = TRUE; // FIX
-
- if (video->nal_ref_idc == 0)
- {
- video->currPic->isReference = FALSE;
- video->currFS->IsOutputted |= 0x02; /* The MASK 0x02 means not needed for reference, or returned */
- /* node need to check for freeing of this buffer */
- }
-
- FMOInit(video);
-
- if (video->currPic->isReference)
- {
- video->PrevRefFrameNum = video->sliceHdr->frame_num;
- }
-
-
- video->prevFrameNum = video->sliceHdr->frame_num;
- }
-
- video->newPic = FALSE;
-
-
- /* Initialize refListIdx for this picture */
- RefListInit(video);
-
- /* Re-order the reference list according to the ref_pic_list_reordering() */
- status = (AVCDec_Status)ReOrderList(video);
- if (status != AVCDEC_SUCCESS)
- {
- return AVCDEC_FAIL;
- }
-
- /* 2.2 Decode Slice. */
- status = (AVCDec_Status)DecodeSlice(decvid);
-
- video->slice_id++; // slice
-
- if (status == AVCDEC_PICTURE_READY)
- {
- /* 3. Check complete picture */
-#ifndef MB_BASED_DEBLOCK
- /* 3.1 Deblock */
- DeblockPicture(video);
-#endif
- /* 3.2 Decoded frame reference marking. */
- /* 3.3 Put the decoded picture in output buffers */
- /* set video->mem_mge_ctrl_eq_5 */
- status = (AVCDec_Status)StorePictureInDPB(avcHandle, video); // CHECK check the retunr status
- if (status != AVCDEC_SUCCESS)
- {
- return AVCDEC_FAIL;
- }
-
- if (video->mem_mgr_ctrl_eq_5)
- {
- video->PrevRefFrameNum = 0;
- video->prevFrameNum = 0;
- video->prevPicOrderCntMsb = 0;
- video->prevPicOrderCntLsb = video->TopFieldOrderCnt;
- video->prevFrameNumOffset = 0;
- }
- else
- {
- video->prevPicOrderCntMsb = video->PicOrderCntMsb;
- video->prevPicOrderCntLsb = video->sliceHdr->pic_order_cnt_lsb;
- video->prevFrameNumOffset = video->FrameNumOffset;
- }
-
- return AVCDEC_PICTURE_READY;
- }
- else if (status != AVCDEC_SUCCESS)
- {
- return AVCDEC_FAIL;
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCDecGetOutput() */
-/* Date : 11/3/2003 */
-/* Purpose : Get the next picture according to PicOrderCnt. */
-/* In/out : */
-/* Return : AVCFrameIO structure */
-/* Modified : */
-/* ======================================================================== */
-
-OSCL_EXPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release, AVCFrameIO *output)
-{
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- AVCDecPicBuffer *dpb;
- AVCFrameStore *oldestFrame = NULL;
- int i, first = 1;
- int count_frame = 0;
- int index = 0;
- int min_poc = 0;
-
- if (decvid == NULL)
- {
- return AVCDEC_FAIL;
- }
-
- video = decvid->common;
- dpb = video->decPicBuf;
-
- if (dpb->num_fs == 0)
- {
- return AVCDEC_FAIL;
- }
-
- /* search for the oldest frame_num in dpb */
- /* extension to field decoding, we have to search for every top_field/bottom_field within
- each frame in the dpb. This code only works for frame based.*/
-
- if (video->mem_mgr_ctrl_eq_5 == FALSE)
- {
- for (i = 0; i < dpb->num_fs; i++)
- {
- if ((dpb->fs[i]->IsOutputted & 0x01) == 0)
- {
- count_frame++;
- if (first)
- {
- min_poc = dpb->fs[i]->PicOrderCnt;
- first = 0;
- oldestFrame = dpb->fs[i];
- index = i;
- }
- if (dpb->fs[i]->PicOrderCnt < min_poc)
- {
- min_poc = dpb->fs[i]->PicOrderCnt;
- oldestFrame = dpb->fs[i];
- index = i;
- }
- }
- }
- }
- else
- {
- for (i = 0; i < dpb->num_fs; i++)
- {
- if ((dpb->fs[i]->IsOutputted & 0x01) == 0 && dpb->fs[i] != video->currFS)
- {
- count_frame++;
- if (first)
- {
- min_poc = dpb->fs[i]->PicOrderCnt;
- first = 0;
- oldestFrame = dpb->fs[i];
- index = i;
- }
- if (dpb->fs[i]->PicOrderCnt < min_poc)
- {
- min_poc = dpb->fs[i]->PicOrderCnt;
- oldestFrame = dpb->fs[i];
- index = i;
- }
- }
- }
-
- if (count_frame < 2 && video->nal_unit_type != AVC_NALTYPE_IDR)
- {
- video->mem_mgr_ctrl_eq_5 = FALSE; // FIX
- }
- else if (count_frame < 1 && video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- for (i = 0; i < dpb->num_fs; i++)
- {
- if (dpb->fs[i] == video->currFS && (dpb->fs[i]->IsOutputted & 0x01) == 0)
- {
- oldestFrame = dpb->fs[i];
- index = i;
- break;
- }
- }
- video->mem_mgr_ctrl_eq_5 = FALSE;
- }
- }
-
- if (oldestFrame == NULL)
- {
-
- /* Check for Mem_mgmt_operation_5 based forced output */
- for (i = 0; i < dpb->num_fs; i++)
- {
- /* looking for the one not used or not reference and has been outputted */
- if (dpb->fs[i]->IsReference == 0 && dpb->fs[i]->IsOutputted == 3)
- {
- break;
- }
- }
- if (i < dpb->num_fs)
- {
- /* there are frames available for decoding */
- return AVCDEC_FAIL; /* no frame to be outputted */
- }
-
-
- /* no free frame available, we have to release one to continue decoding */
- int MinIdx = 0;
- int32 MinFrameNumWrap = 0x7FFFFFFF;
-
- for (i = 0; i < dpb->num_fs; i++)
- {
- if (dpb->fs[i]->IsReference && !dpb->fs[i]->IsLongTerm)
- {
- if (dpb->fs[i]->FrameNumWrap < MinFrameNumWrap)
- {
- MinFrameNumWrap = dpb->fs[i]->FrameNumWrap;
- MinIdx = i;
- }
- }
- }
- /* mark the frame with smallest PicOrderCnt to be unused for reference */
- dpb->fs[MinIdx]->IsReference = 0;
- dpb->fs[MinIdx]->IsLongTerm = 0;
- dpb->fs[MinIdx]->frame.isReference = FALSE;
- dpb->fs[MinIdx]->frame.isLongTerm = FALSE;
- dpb->fs[MinIdx]->IsOutputted |= 0x02;
-#ifdef PV_MEMORY_POOL
- if (dpb->fs[MinIdx]->IsOutputted == 3)
- {
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx);
- }
-#endif
- return AVCDEC_FAIL;
- }
- /* MASK 0x01 means the frame is outputted (for display). A frame gets freed when it is
- outputted (0x01) and not needed for reference (0x02) */
- oldestFrame->IsOutputted |= 0x01;
-
- if (oldestFrame->IsOutputted == 3)
- {
- *release = 1; /* flag to release the buffer */
- }
- else
- {
- *release = 0;
- }
- /* do not release buffer here, release it after it is sent to the sink node */
-
- output->YCbCr[0] = oldestFrame->frame.Sl;
- output->YCbCr[1] = oldestFrame->frame.Scb;
- output->YCbCr[2] = oldestFrame->frame.Scr;
- output->height = oldestFrame->frame.height;
- output->pitch = oldestFrame->frame.width;
- output->disp_order = oldestFrame->PicOrderCnt;
- output->coding_order = oldestFrame->FrameNum;
- output->id = (uint32) oldestFrame->base_dpb; /* use the pointer as the id */
- *indx = index;
-
-
-
- return AVCDEC_SUCCESS;
-}
-
-
-/* ======================================================================== */
-/* Function : PVAVCDecReset() */
-/* Date : 03/04/2004 */
-/* Purpose : Reset decoder, prepare it for a new IDR frame. */
-/* In/out : */
-/* Return : void */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF void PVAVCDecReset(AVCHandle *avcHandle)
-{
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- AVCDecPicBuffer *dpb;
- int i;
-
- if (decvid == NULL)
- {
- return;
- }
-
- video = decvid->common;
- dpb = video->decPicBuf;
-
- /* reset the DPB */
-
-
- for (i = 0; i < dpb->num_fs; i++)
- {
- dpb->fs[i]->IsLongTerm = 0;
- dpb->fs[i]->IsReference = 0;
- dpb->fs[i]->IsOutputted = 3;
- dpb->fs[i]->frame.isReference = 0;
- dpb->fs[i]->frame.isLongTerm = 0;
- }
-
- video->mem_mgr_ctrl_eq_5 = FALSE;
- video->newPic = TRUE;
- video->newSlice = TRUE;
- video->currPic = NULL;
- video->currFS = NULL;
- video->prevRefPic = NULL;
- video->prevFrameNum = 0;
- video->PrevRefFrameNum = 0;
- video->prevFrameNumOffset = 0;
- video->FrameNumOffset = 0;
- video->mbNum = 0;
- video->numMBs = 0;
-
- return ;
-}
-
-
-/* ======================================================================== */
-/* Function : PVAVCCleanUpDecoder() */
-/* Date : 11/4/2003 */
-/* Purpose : Clean up the decoder, free all memories allocated. */
-/* In/out : */
-/* Return : void */
-/* Modified : */
-/* ======================================================================== */
-
-OSCL_EXPORT_REF void PVAVCCleanUpDecoder(AVCHandle *avcHandle)
-{
- AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- void *userData = avcHandle->userData;
- int i;
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "PVAVCCleanUpDecoder", -1, -1);
-
- if (decvid != NULL)
- {
- video = decvid->common;
- if (video != NULL)
- {
- if (video->MbToSliceGroupMap != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap);
- }
-
-#ifdef MB_BASED_DEBLOCK
- if (video->intra_pred_top != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top);
- }
- if (video->intra_pred_top_cb != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cb);
- }
- if (video->intra_pred_top_cr != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cr);
- }
-#endif
- if (video->mblock != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->mblock);
- }
-
- if (video->decPicBuf != NULL)
- {
- CleanUpDPB(avcHandle, video);
- avcHandle->CBAVC_Free(userData, (int)video->decPicBuf);
- }
-
- if (video->sliceHdr != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)video->sliceHdr);
- }
-
- avcHandle->CBAVC_Free(userData, (int)video); /* last thing to do */
-
- }
-
- for (i = 0; i < 256; i++)
- {
- if (decvid->picParams[i] != NULL)
- {
- if (decvid->picParams[i]->slice_group_id != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]->slice_group_id);
- }
- avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]);
- }
- }
- for (i = 0; i < 32; i++)
- {
- if (decvid->seqParams[i] != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)decvid->seqParams[i]);
- }
- }
- if (decvid->bitstream != NULL)
- {
- avcHandle->CBAVC_Free(userData, (int)decvid->bitstream);
- }
-
-
- avcHandle->CBAVC_Free(userData, (int)decvid);
- }
-
-
- return ;
-}
diff --git a/media/libstagefright/codecs/avc/dec/src/avcdec_bitstream.h b/media/libstagefright/codecs/avc/dec/src/avcdec_bitstream.h
deleted file mode 100644
index bd1bc59..0000000
--- a/media/libstagefright/codecs/avc/dec/src/avcdec_bitstream.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains bitstream related functions.
-@publishedAll
-*/
-
-#ifndef _AVCDEC_BITSTREAM_H_
-#define _AVCDEC_BITSTREAM_H_
-
-#include "avcdec_lib.h"
-
-#define WORD_SIZE 32 /* this can vary, default to 32 bit for now */
-
-#ifndef __cplusplus
-
-#define AVC_GETDATA(x,y) userData->AVC_GetData(x,y)
-
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-#define BitstreamFlushBits(A,B) {(A)->bitcnt += (B); (A)->incnt -= (B); (A)->curr_word <<= (B);}
-
- AVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream);
- /**
- This function populates bitstream structure.
- \param "stream" "Pointer to bitstream structure."
- \param "buffer" "Pointer to the bitstream buffer."
- \param "size" "Size of the buffer."
- \param "nal_size" "Size of the NAL unit."
- \param "resetall" "Flag for reset everything."
- \return "AVCDEC_SUCCESS for success and AVCDEC_FAIL for fail."
- */
- AVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size);
-
- /**
- This function reads next aligned word and remove the emulation prevention code
- if necessary.
- \param "stream" "Pointer to bitstream structure."
- \return "Next word."
- */
- uint BitstreamNextWord(AVCDecBitstream *stream);
-
- /**
- This function reads nBits bits from the current position and advance the pointer.
- \param "stream" "Pointer to bitstream structure."
- \param "nBits" "Number of bits to be read."
- \param "code" "Point to the read value."
- \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits
- is greater than the word-size, AVCDEC_PACKET_LOSS or
- AVCDEC_NO_DATA if callback to get data fails."
- */
- AVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code);
-
- /**
- This function shows nBits bits from the current position without advancing the pointer.
- \param "stream" "Pointer to bitstream structure."
- \param "nBits" "Number of bits to be read."
- \param "code" "Point to the read value."
- \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits
- is greater than the word-size, AVCDEC_NO_DATA if it needs
- to callback to get data."
- */
- AVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code);
-
-
- /**
- This function flushes nBits bits from the current position.
- \param "stream" "Pointer to bitstream structure."
- \param "nBits" "Number of bits to be read."
- \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits
- is greater than the word-size It will not call back to get
- more data. Users should call BitstreamShowBits to determine
- how much they want to flush."
- */
-
- /**
- This function read 1 bit from the current position and advance the pointer.
- \param "stream" "Pointer to bitstream structure."
- \param "nBits" "Number of bits to be read."
- \param "code" "Point to the read value."
- \return "AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits
- is greater than the word-size, AVCDEC_PACKET_LOSS or
- AVCDEC_NO_DATA if callback to get data fails."
- */
- AVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code);
-
- /**
- This function checks whether the current bit position is byte-aligned or not.
- \param "stream" "Pointer to the bitstream structure."
- \return "TRUE if byte-aligned, FALSE otherwise."
- */
- bool byte_aligned(AVCDecBitstream *stream);
- AVCDec_Status BitstreamByteAlign(AVCDecBitstream *stream);
- /**
- This function checks whether there are more RBSP data before the trailing bits.
- \param "stream" "Pointer to the bitstream structure."
- \return "TRUE if yes, FALSE otherwise."
- */
- bool more_rbsp_data(AVCDecBitstream *stream);
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* _AVCDEC_BITSTREAM_H_ */
diff --git a/media/libstagefright/codecs/avc/dec/src/avcdec_int.h b/media/libstagefright/codecs/avc/dec/src/avcdec_int.h
deleted file mode 100644
index 878f9b3..0000000
--- a/media/libstagefright/codecs/avc/dec/src/avcdec_int.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains application function interfaces to the AVC decoder library
-and necessary type defitionitions and enumerations.
-Naming convention for variables:
-lower_case_with_under_line is syntax element in subclause 7.2 and 7.3
-noUnderLine or NoUnderLine is derived variables defined somewhere else in the draft
- or introduced by this decoder library.
-@publishedAll
-*/
-
-#ifndef _AVCDEC_INT_H_
-#define _AVCDEC_INT_H_
-
-#include "avcint_common.h"
-#include "avcdec_api.h"
-
-
-/**
-Bitstream structure contains bitstream related parameters such as the pointer
-to the buffer, the current byte position and bit position.
-@publishedAll
-*/
-typedef struct tagDecBitstream
-{
- uint8 *bitstreamBuffer; /* pointer to buffer memory */
- int nal_size; /* size of the current NAL unit */
- int data_end_pos; /* bitstreamBuffer size in bytes */
- int read_pos; /* next position to read from bitstreamBuffer */
- uint curr_word; /* byte-swapped (MSB left) current word read from buffer */
- int bit_left; /* number of bit left in current_word */
- uint next_word; /* in case for old data in previous buffer hasn't been flushed. */
- int incnt; /* bit left in the prev_word */
- int incnt_next;
- int bitcnt;
- void *userData;
-} AVCDecBitstream;
-
-/**
-This structure is the main object for AVC decoder library providing access to all
-global variables. It is allocated at PVAVCInitDecoder and freed at PVAVCCleanUpDecoder.
-@publishedAll
-*/
-typedef struct tagDecObject
-{
-
- AVCCommonObj *common;
-
- AVCDecBitstream *bitstream; /* for current NAL */
-
- /* sequence parameter set */
- AVCSeqParamSet *seqParams[32]; /* Array of pointers, get allocated at arrival of new seq_id */
-
- /* picture parameter set */
- AVCPicParamSet *picParams[256]; /* Array of pointers to picture param set structures */
-
- /* For internal operation, scratch memory for MV, prediction, transform, etc.*/
- uint ref_idx_l0[4]; /* [mbPartIdx], te(v) */
- uint ref_idx_l1[4];
-
- /* function pointers */
- AVCDec_Status(*residual_block)(struct tagDecObject*, int, int,
- int *, int *, int *);
- /* Application control data */
- AVCHandle *avcHandle;
- void (*AVC_DebugLog)(AVCLogType type, char *string1, char *string2);
- /*bool*/
- uint debugEnable;
-
-} AVCDecObject;
-
-#endif /* _AVCDEC_INT_H_ */
diff --git a/media/libstagefright/codecs/avc/dec/src/avcdec_lib.h b/media/libstagefright/codecs/avc/dec/src/avcdec_lib.h
deleted file mode 100644
index fdead05..0000000
--- a/media/libstagefright/codecs/avc/dec/src/avcdec_lib.h
+++ /dev/null
@@ -1,555 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains declarations of internal functions for AVC decoder library.
-@publishedAll
-*/
-#ifndef _AVCDEC_LIB_H_
-#define _AVCDEC_LIB_H_
-
-#include "avclib_common.h"
-#include "avcdec_int.h"
-
-/*----------- avcdec_api.c -------------*/
-/**
-This function takes out the emulation prevention bytes from the input to creat RBSP.
-The result is written over the input bitstream.
-\param "nal_unit" "(I/O) Pointer to the input buffer."
-\param "size" "(I/O) Pointer to the size of the input/output buffer."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size);
-
-/*------------- pred_intra.c ---------------*/
-/**
-This function is the main entry point to intra prediction operation on a
-macroblock.
-\param "video" "Pointer to AVCCommonObj."
-*/
-AVCStatus IntraMBPrediction(AVCCommonObj *video);
-
-void SaveNeighborForIntraPred(AVCCommonObj *video, int offset);
-
-AVCStatus Intra_4x4(AVCCommonObj *video, int component, int SubBlock_indx, uint8 *comp);
-void Intra_4x4_Vertical(AVCCommonObj *video, int block_offset);
-void Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset);
-void Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset, AVCNeighborAvailability *availability);
-void Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability);
-void Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int block_offset);
-void Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset);
-void Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch, int block_offset);
-void Intra_4x4_Vertical_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability);
-void Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset);
-void Intra_16x16_Vertical(AVCCommonObj *video);
-void Intra_16x16_Horizontal(AVCCommonObj *video, int pitch);
-void Intra_16x16_DC(AVCCommonObj *video, int pitch);
-void Intra_16x16_Plane(AVCCommonObj *video, int pitch);
-void Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);
-void Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);
-void Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr);
-void Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);
-
-/*------------ pred_inter.c ---------------*/
-/**
-This function is the main entrance to inter prediction operation for
-a macroblock. For decoding, this function also calls inverse transform and
-compensation.
-\param "video" "Pointer to AVCCommonObj."
-\return "void"
-*/
-void InterMBPrediction(AVCCommonObj *video);
-
-/**
-This function is called for luma motion compensation.
-\param "ref" "Pointer to the origin of a reference luma."
-\param "picwidth" "Width of the picture."
-\param "picheight" "Height of the picture."
-\param "x_pos" "X-coordinate of the predicted block in quarter pel resolution."
-\param "y_pos" "Y-coordinate of the predicted block in quarter pel resolution."
-\param "pred" "Pointer to the output predicted block."
-\param "pred_pitch" "Width of pred."
-\param "blkwidth" "Width of the current partition."
-\param "blkheight" "Height of the current partition."
-\return "void"
-*/
-void LumaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight);
-
-/**
-Functions below are special cases for luma motion compensation.
-LumaFullPelMC is for full pixel motion compensation.
-LumaBorderMC is for interpolation in only one dimension.
-LumaCrossMC is for interpolation in one dimension and half point in the other dimension.
-LumaDiagonalMC is for interpolation in diagonal direction.
-
-\param "ref" "Pointer to the origin of a reference luma."
-\param "picwidth" "Width of the picture."
-\param "picheight" "Height of the picture."
-\param "x_pos" "X-coordinate of the predicted block in full pel resolution."
-\param "y_pos" "Y-coordinate of the predicted block in full pel resolution."
-\param "dx" "Fraction of x_pos in quarter pel."
-\param "dy" "Fraction of y_pos in quarter pel."
-\param "curr" "Pointer to the current partition in the current picture."
-\param "residue" "Pointer to the current partition for the residue block."
-\param "blkwidth" "Width of the current partition."
-\param "blkheight" "Height of the current partition."
-\return "void"
-*/
-void CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos,
- uint8 *out, int blkwidth, int blkheight);
-
-void FullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch,
- int blkwidth, int blkheight);
-
-void HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx);
-
-void HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx);
-
-void HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight);
-
-void VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy);
-
-void VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight);
-
-void VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy);
-
-void DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
- uint8 *out, int outpitch,
- int blkwidth, int blkheight);
-
-
-void ChromaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos, uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight);
-
-void ChromaFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight) ;
-void ChromaBorderMC(uint8 *ref, int picwidth, int dx, int dy,
- uint8 *pred, int pred_pitch, int blkwidth, int blkheight);
-void ChromaDiagonalMC(uint8 *ref, int picwidth, int dx, int dy,
- uint8 *pred, int pred_pitch, int blkwidth, int blkheight);
-
-void ChromaFullPelMCOutside(uint8 *ref, uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight, int x_inc,
- int y_inc0, int y_inc1, int x_mid, int y_mid);
-void ChromaBorderMCOutside(uint8 *ref, int picwidth, int dx, int dy,
- uint8 *pred, int pred_pitch, int blkwidth, int blkheight,
- int x_inc, int z_inc, int y_inc0, int y_inc1, int x_mid, int y_mid);
-void ChromaDiagonalMCOutside(uint8 *ref, int picwidth,
- int dx, int dy, uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight, int x_inc, int z_inc,
- int y_inc0, int y_inc1, int x_mid, int y_mid);
-
-void ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-void ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-
-/*----------- slice.c ---------------*/
-/**
-This function performs the main decoding loop for slice data including
-INTRA/INTER prediction, transform and quantization and compensation.
-See decode_frame_slice() in JM.
-\param "video" "Pointer to AVCDecObject."
-\return "AVCDEC_SUCCESS for success, AVCDEC_PICTURE_READY for end-of-picture and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status DecodeSlice(AVCDecObject *video);
-AVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end);
-/**
-This function performs the decoding of one macroblock.
-\param "video" "Pointer to AVCDecObject."
-\param "prevMbSkipped" "A value derived in 7.3.4."
-\return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status DecodeMB(AVCDecObject *video);
-
-/**
-This function performs macroblock prediction type decoding as in subclause 7.3.5.1.
-\param "video" "Pointer to AVCCommonObj."
-\param "currMB" "Pointer to the current macroblock."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);
-
-/**
-This function performs sub-macroblock prediction type decoding as in subclause 7.3.5.2.
-\param "video" "Pointer to AVCCommonObj."
-\param "currMB" "Pointer to the current macroblock."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);
-
-/**
-This function interprets the mb_type and sets necessary information
-when the slice type is AVC_I_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretMBModeI(AVCMacroblock *mblock, uint mb_type);
-
-/**
-This function interprets the mb_type and sets necessary information
-when the slice type is AVC_P_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretMBModeP(AVCMacroblock *mblock, uint mb_type);
-
-/**
-This function interprets the mb_type and sets necessary information
-when the slice type is AVC_B_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretMBModeB(AVCMacroblock *mblock, uint mb_type);
-
-/**
-This function interprets the mb_type and sets necessary information
-when the slice type is AVC_SI_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type);
-
-/**
-This function interprets the sub_mb_type and sets necessary information
-when the slice type is AVC_P_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "sub_mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type);
-
-/**
-This function interprets the sub_mb_type and sets necessary information
-when the slice type is AVC_B_SLICE.
-in the macroblock structure.
-\param "mblock" "Pointer to current AVCMacroblock."
-\param "sub_mb_type" "From the syntax bitstream."
-\return "void"
-*/
-void InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type);
-
-/**
-This function decodes the Intra4x4 prediction mode from neighboring information
-and from the decoded syntax.
-\param "video" "Pointer to AVCCommonObj."
-\param "currMB" "Pointer to current macroblock."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);
-
-/*----------- vlc.c -------------------*/
-/**
-This function reads and decodes Exp-Golomb codes.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "codeNum" "Pointer to the value of the codeNum."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum);
-
-/**
-This function reads and decodes signed Exp-Golomb codes.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "value" "Pointer to syntax element value."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status se_v(AVCDecBitstream *bitstream, int *value);
-
-/**
-This function reads and decodes signed Exp-Golomb codes for
-32 bit codeword.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "value" "Pointer to syntax element value."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status se_v32bit(AVCDecBitstream *bitstream, int32 *value);
-
-/**
-This function reads and decodes truncated Exp-Golomb codes.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "value" "Pointer to syntax element value."
-\param "range" "Range of the value as input to determine the algorithm."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range);
-
-/**
-This function parse Exp-Golomb code from the bitstream.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "leadingZeros" "Pointer to the number of leading zeros."
-\param "infobits" "Pointer to the value after leading zeros and the first one.
- The total number of bits read is 2*leadingZeros + 1."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status GetEGBitstring(AVCDecBitstream *bitstream, int *leadingZeros, int *infobits);
-
-/**
-This function parse Exp-Golomb code from the bitstream for 32 bit codewords.
-\param "bitstream" "Pointer to AVCDecBitstream."
-\param "leadingZeros" "Pointer to the number of leading zeros."
-\param "infobits" "Pointer to the value after leading zeros and the first one.
- The total number of bits read is 2*leadingZeros + 1."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits);
-
-/**
-This function performs CAVLC decoding of the CBP (coded block pattern) of a macroblock
-by calling ue_v() and then mapping the codeNum to the corresponding CBP value.
-\param "currMB" "Pointer to the current AVCMacroblock structure."
-\param "stream" "Pointer to the AVCDecBitstream."
-\return "void"
-*/
-AVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream);
-
-/**
-This function decodes the syntax for trailing ones and total coefficient.
-Subject to optimization.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "TrailingOnes" "Pointer to the trailing one variable output."
-\param "TotalCoeff" "Pointer to the total coefficient variable output."
-\param "nC" "Context for number of nonzero coefficient (prediction context)."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC);
-
-/**
-This function decodes the syntax for trailing ones and total coefficient for
-chroma DC block. Subject to optimization.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "TrailingOnes" "Pointer to the trailing one variable output."
-\param "TotalCoeff" "Pointer to the total coefficient variable output."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff);
-
-/**
-This function decode a VLC table with 2 output.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "lentab" "Table for code length."
-\param "codtab" "Table for code value."
-\param "tabwidth" "Width of the table or alphabet size of the first output."
-\param "tabheight" "Height of the table or alphabet size of the second output."
-\param "code1" "Pointer to the first output."
-\param "code2" "Pointer to the second output."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status code_from_bitstream_2d(AVCDecBitstream *stream, int *lentab, int *codtab, int tabwidth,
- int tabheight, int *code1, int *code2);
-
-/**
-This function decodes the level_prefix VLC value as in Table 9-6.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "code" "Pointer to the output."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code);
-
-/**
-This function decodes total_zeros VLC syntax as in Table 9-7 and 9-8.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "code" "Pointer to the output."
-\param "TotalCoeff" "Context parameter."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff);
-
-/**
-This function decodes total_zeros VLC syntax for chroma DC as in Table 9-9.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "code" "Pointer to the output."
-\param "TotalCoeff" "Context parameter."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff);
-
-/**
-This function decodes run_before VLC syntax as in Table 9-10.
-\param "stream" "Pointer to the AVCDecBitstream."
-\param "code" "Pointer to the output."
-\param "zeroLeft" "Context parameter."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zeroLeft);
-
-/*----------- header.c -------------------*/
-/**
-This function parses vui_parameters.
-\param "decvid" "Pointer to AVCDecObject."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS);
-AVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize);
-
-AVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream);
-AVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream);
-AVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream);
-AVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream);
-AVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream);
-
-
-/**
-This function parses hrd_parameters.
-\param "decvid" "Pointer to AVCDecObject."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam);
-
-/**
-This function decodes the syntax in sequence parameter set slice and fill up the AVCSeqParamSet
-structure.
-\param "decvid" "Pointer to AVCDecObject."
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream);
-
-/**
-This function decodes the syntax in picture parameter set and fill up the AVCPicParamSet
-structure.
-\param "decvid" "Pointer to AVCDecObject."
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS or AVCDEC_FAIL."
-*/
-AVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream);
-AVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream);
-
-/**
-This function decodes slice header, calls related functions such as
-reference picture list reordering, prediction weight table, decode ref marking.
-See FirstPartOfSliceHeader() and RestOfSliceHeader() in JM.
-\param "decvid" "Pointer to AVCDecObject."
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream);
-
-/**
-This function performes necessary operations to create dummy frames when
-there is a gap in frame_num.
-\param "video" "Pointer to AVCCommonObj."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video);
-
-/**
-This function decodes ref_pic_list_reordering related syntax and fill up the AVCSliceHeader
-structure.
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-\param "sliceHdr" "Pointer to AVCSliceHdr."
-\param "slice_type" "Value of slice_type - 5 if greater than 5."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type);
-
-/**
-This function decodes dec_ref_pic_marking related syntax and fill up the AVCSliceHeader
-structure.
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-\param "sliceHdr" "Pointer to AVCSliceHdr."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-*/
-AVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr);
-
-/**
-This function performs POC related operation prior to decoding a picture
-\param "video" "Pointer to AVCCommonObj."
-\return "AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise."
-See also PostPOC() for initialization of some variables.
-*/
-AVCDec_Status DecodePOC(AVCCommonObj *video);
-
-
-
-/*------------ residual.c ------------------*/
-/**
-This function decodes the intra pcm data and fill it in the corresponding location
-on the current picture.
-\param "video" "Pointer to AVCCommonObj."
-\param "stream" "Pointer to AVCDecBitstream."
-*/
-AVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream);
-
-/**
-This function performs residual syntax decoding as well as quantization and transformation of
-the decoded coefficients. See subclause 7.3.5.3.
-\param "video" "Pointer to AVCDecObject."
-\param "currMB" "Pointer to current macroblock."
-*/
-AVCDec_Status residual(AVCDecObject *video, AVCMacroblock *currMB);
-
-/**
-This function performs CAVLC syntax decoding to get the run and level information of the coefficients.
-\param "video" "Pointer to AVCDecObject."
-\param "type" "One of AVCResidualType for a particular 4x4 block."
-\param "bx" "Horizontal block index."
-\param "by" "Vertical block index."
-\param "level" "Pointer to array of level for output."
-\param "run" "Pointer to array of run for output."
-\param "numcoeff" "Pointer to the total number of nonzero coefficients."
-\return "AVCDEC_SUCCESS for success."
-*/
-AVCDec_Status residual_block_cavlc(AVCDecObject *video, int nC, int maxNumCoeff,
- int *level, int *run, int *numcoeff);
-
-#endif /* _AVCDEC_LIB_H_ */
diff --git a/media/libstagefright/codecs/avc/dec/src/header.cpp b/media/libstagefright/codecs/avc/dec/src/header.cpp
deleted file mode 100644
index 8681e2b..0000000
--- a/media/libstagefright/codecs/avc/dec/src/header.cpp
+++ /dev/null
@@ -1,1391 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcdec_lib.h"
-#include "avcdec_bitstream.h"
-#include "avcdec_api.h"
-
-/** see subclause 7.4.2.1 */
-AVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- AVCDec_Status status = AVCDEC_SUCCESS;
- AVCSeqParamSet *seqParam;
- uint temp;
- int i;
- uint profile_idc, constrained_set0_flag, constrained_set1_flag, constrained_set2_flag;
- uint level_idc, seq_parameter_set_id;
- void *userData = decvid->avcHandle->userData;
- AVCHandle *avcHandle = decvid->avcHandle;
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "DecodeSPS", -1, -1);
-
- BitstreamReadBits(stream, 8, &profile_idc);
- BitstreamRead1Bit(stream, &constrained_set0_flag);
-// if (profile_idc != 66 && constrained_set0_flag != 1)
-// {
-// return AVCDEC_FAIL;
-// }
- BitstreamRead1Bit(stream, &constrained_set1_flag);
- BitstreamRead1Bit(stream, &constrained_set2_flag);
- BitstreamReadBits(stream, 5, &temp);
- BitstreamReadBits(stream, 8, &level_idc);
- if (level_idc > 51)
- {
- return AVCDEC_FAIL;
- }
- if (mapLev2Idx[level_idc] == 255)
- {
- return AVCDEC_FAIL;
- }
- ue_v(stream, &seq_parameter_set_id);
-
- if (seq_parameter_set_id > 31)
- {
- return AVCDEC_FAIL;
- }
-
- /* Allocate sequence param set for seqParams[seq_parameter_set_id]. */
- if (decvid->seqParams[seq_parameter_set_id] == NULL) /* allocate seqParams[id] */
- {
- decvid->seqParams[seq_parameter_set_id] =
- (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR);
-
- if (decvid->seqParams[seq_parameter_set_id] == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
- }
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "done alloc seqParams", -1, -1);
-
- seqParam = decvid->seqParams[seq_parameter_set_id];
-
- seqParam->profile_idc = profile_idc;
- seqParam->constrained_set0_flag = constrained_set0_flag;
- seqParam->constrained_set1_flag = constrained_set1_flag;
- seqParam->constrained_set2_flag = constrained_set2_flag;
- seqParam->level_idc = level_idc;
- seqParam->seq_parameter_set_id = seq_parameter_set_id;
-
- /* continue decoding SPS */
- ue_v(stream, &(seqParam->log2_max_frame_num_minus4));
-
- if (seqParam->log2_max_frame_num_minus4 > 12)
- {
- return AVCDEC_FAIL;
- }
-
- ue_v(stream, &(seqParam->pic_order_cnt_type));
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 1", seqParam->log2_max_frame_num_minus4, seqParam->pic_order_cnt_type);
-
- if (seqParam->pic_order_cnt_type == 0)
- {
- ue_v(stream, &(seqParam->log2_max_pic_order_cnt_lsb_minus4));
- }
- else if (seqParam->pic_order_cnt_type == 1)
- { // MC_CHECK
- BitstreamRead1Bit(stream, (uint*)&(seqParam->delta_pic_order_always_zero_flag));
- se_v32bit(stream, &(seqParam->offset_for_non_ref_pic));
- se_v32bit(stream, &(seqParam->offset_for_top_to_bottom_field));
- ue_v(stream, &(seqParam->num_ref_frames_in_pic_order_cnt_cycle));
-
- for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++)
- {
- se_v32bit(stream, &(seqParam->offset_for_ref_frame[i]));
- }
- }
-
- ue_v(stream, &(seqParam->num_ref_frames));
-
- if (seqParam->num_ref_frames > 16)
- {
- return AVCDEC_FAIL;
- }
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 2", seqParam->num_ref_frames, -1);
-
- BitstreamRead1Bit(stream, (uint*)&(seqParam->gaps_in_frame_num_value_allowed_flag));
- ue_v(stream, &(seqParam->pic_width_in_mbs_minus1));
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "picwidth", seqParam->pic_width_in_mbs_minus1, -1);
-
- ue_v(stream, &(seqParam->pic_height_in_map_units_minus1));
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "picwidth", seqParam->pic_height_in_map_units_minus1, -1);
-
- BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_mbs_only_flag));
-
- seqParam->mb_adaptive_frame_field_flag = 0; /* default value */
- if (!seqParam->frame_mbs_only_flag)
- {
- BitstreamRead1Bit(stream, (uint*)&(seqParam->mb_adaptive_frame_field_flag));
- }
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 3", seqParam->frame_mbs_only_flag, -1);
-
- BitstreamRead1Bit(stream, (uint*)&(seqParam->direct_8x8_inference_flag));
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 4", seqParam->direct_8x8_inference_flag, -1);
-
- BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_cropping_flag));
- seqParam->frame_crop_left_offset = 0; /* default value */
- seqParam->frame_crop_right_offset = 0;/* default value */
- seqParam->frame_crop_top_offset = 0;/* default value */
- seqParam->frame_crop_bottom_offset = 0;/* default value */
- if (seqParam->frame_cropping_flag)
- {
- ue_v(stream, &(seqParam->frame_crop_left_offset));
- ue_v(stream, &(seqParam->frame_crop_right_offset));
- ue_v(stream, &(seqParam->frame_crop_top_offset));
- ue_v(stream, &(seqParam->frame_crop_bottom_offset));
- }
-
- DEBUG_LOG(userData, AVC_LOGTYPE_INFO, "check point 5", seqParam->frame_cropping_flag, -1);
-
- BitstreamRead1Bit(stream, (uint*)&(seqParam->vui_parameters_present_flag));
- if (seqParam->vui_parameters_present_flag)
- {
- status = vui_parameters(decvid, stream, seqParam);
- if (status != AVCDEC_SUCCESS)
- {
- return AVCDEC_FAIL;
- }
- }
-
- return status;
-}
-
-
-AVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS)
-{
- uint temp;
- uint temp32;
- uint aspect_ratio_idc, overscan_appopriate_flag, video_format, video_full_range_flag;
- /* aspect_ratio_info_present_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- BitstreamReadBits(stream, 8, &aspect_ratio_idc);
- if (aspect_ratio_idc == 255)
- {
- /* sar_width */
- BitstreamReadBits(stream, 16, &temp);
- /* sar_height */
- BitstreamReadBits(stream, 16, &temp);
- }
- }
- /* overscan_info_present */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- BitstreamRead1Bit(stream, &overscan_appopriate_flag);
- }
- /* video_signal_type_present_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- BitstreamReadBits(stream, 3, &video_format);
- BitstreamRead1Bit(stream, &video_full_range_flag);
- /* colour_description_present_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* colour_primaries */
- BitstreamReadBits(stream, 8, &temp);
- /* transfer_characteristics */
- BitstreamReadBits(stream, 8, &temp);
- /* matrix coefficients */
- BitstreamReadBits(stream, 8, &temp);
- }
- }
- /* chroma_loc_info_present_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* chroma_sample_loc_type_top_field */
- ue_v(stream, &temp);
- /* chroma_sample_loc_type_bottom_field */
- ue_v(stream, &temp);
- }
-
- /* timing_info_present_flag*/
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* num_unit_in_tick*/
- BitstreamReadBits(stream, 32, &temp32);
- /* time_scale */
- BitstreamReadBits(stream, 32, &temp32);
- /* fixed_frame_rate_flag */
- BitstreamRead1Bit(stream, &temp);
- }
-
- /* nal_hrd_parameters_present_flag */
- BitstreamRead1Bit(stream, &temp);
- currSPS->vui_parameters.nal_hrd_parameters_present_flag = temp;
- if (temp)
- {
- hrd_parameters(decvid, stream, &(currSPS->vui_parameters.nal_hrd_parameters));
- }
- /* vcl_hrd_parameters_present_flag*/
- BitstreamRead1Bit(stream, &temp);
- currSPS->vui_parameters.vcl_hrd_parameters_present_flag = temp;
- if (temp)
- {
- hrd_parameters(decvid, stream, &(currSPS->vui_parameters.vcl_hrd_parameters));
- }
- if (currSPS->vui_parameters.nal_hrd_parameters_present_flag || currSPS->vui_parameters.vcl_hrd_parameters_present_flag)
- {
- /* low_delay_hrd_flag */
- BitstreamRead1Bit(stream, &temp);
- }
- /* pic_struct_present_flag */
- BitstreamRead1Bit(stream, &temp);
- currSPS->vui_parameters.pic_struct_present_flag = temp;
- /* bitstream_restriction_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* motion_vectors_over_pic_boundaries_flag */
- BitstreamRead1Bit(stream, &temp);
- /* max_bytes_per_pic_denom */
- ue_v(stream, &temp);
- /* max_bits_per_mb_denom */
- ue_v(stream, &temp);
- /* log2_max_mv_length_horizontal */
- ue_v(stream, &temp);
- /* log2_max_mv_length_vertical */
- ue_v(stream, &temp);
- /* num_reorder_frames */
- ue_v(stream, &temp);
- /* max_dec_frame_buffering */
- ue_v(stream, &temp);
- }
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam)
-{
- OSCL_UNUSED_ARG(decvid);
- uint temp;
- uint cpb_cnt_minus1;
- uint i;
- ue_v(stream, &cpb_cnt_minus1);
- HRDParam->cpb_cnt_minus1 = cpb_cnt_minus1;
- /* bit_rate_scale */
- BitstreamReadBits(stream, 4, &temp);
- /* cpb_size_scale */
- BitstreamReadBits(stream, 4, &temp);
- for (i = 0; i <= cpb_cnt_minus1; i++)
- {
- /* bit_rate_value_minus1[i] */
- ue_v(stream, &temp);
- /* cpb_size_value_minus1[i] */
- ue_v(stream, &temp);
- /* cbr_flag[i] */
- ue_v(stream, &temp);
- }
- /* initial_cpb_removal_delay_length_minus1 */
- BitstreamReadBits(stream, 5, &temp);
- /* cpb_removal_delay_length_minus1 */
- BitstreamReadBits(stream, 5, &temp);
- HRDParam->cpb_removal_delay_length_minus1 = temp;
- /* dpb_output_delay_length_minus1 */
- BitstreamReadBits(stream, 5, &temp);
- HRDParam->dpb_output_delay_length_minus1 = temp;
- /* time_offset_length */
- BitstreamReadBits(stream, 5, &temp);
- HRDParam->time_offset_length = temp;
- return AVCDEC_SUCCESS;
-}
-
-
-/** see subclause 7.4.2.2 */
-AVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream)
-{
- AVCPicParamSet *picParam;
- AVCDec_Status status;
- int i, iGroup, numBits;
- int PicWidthInMbs, PicHeightInMapUnits, PicSizeInMapUnits;
- uint pic_parameter_set_id, seq_parameter_set_id;
- void *userData = decvid->avcHandle->userData;
- AVCHandle *avcHandle = decvid->avcHandle;
-
- ue_v(stream, &pic_parameter_set_id);
- if (pic_parameter_set_id > 255)
- {
- return AVCDEC_FAIL;
- }
-
- ue_v(stream, &seq_parameter_set_id);
-
- if (seq_parameter_set_id > 31)
- {
- return AVCDEC_FAIL;
- }
-
- /* 2.1 if picParams[pic_param_set_id] is NULL, allocate it. */
- if (decvid->picParams[pic_parameter_set_id] == NULL)
- {
- decvid->picParams[pic_parameter_set_id] =
- (AVCPicParamSet*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR);
- if (decvid->picParams[pic_parameter_set_id] == NULL)
- {
- return AVCDEC_MEMORY_FAIL;
- }
-
- decvid->picParams[pic_parameter_set_id]->slice_group_id = NULL;
- }
-
- video->currPicParams = picParam = decvid->picParams[pic_parameter_set_id];
- picParam->seq_parameter_set_id = seq_parameter_set_id;
- picParam->pic_parameter_set_id = pic_parameter_set_id;
-
- BitstreamRead1Bit(stream, (uint*)&(picParam->entropy_coding_mode_flag));
- if (picParam->entropy_coding_mode_flag)
- {
- status = AVCDEC_FAIL;
- goto clean_up;
- }
- BitstreamRead1Bit(stream, (uint*)&(picParam->pic_order_present_flag));
- ue_v(stream, &(picParam->num_slice_groups_minus1));
-
- if (picParam->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1)
- {
- status = AVCDEC_FAIL;
- goto clean_up;
- }
-
- picParam->slice_group_change_rate_minus1 = 0; /* default value */
- if (picParam->num_slice_groups_minus1 > 0)
- {
- ue_v(stream, &(picParam->slice_group_map_type));
- if (picParam->slice_group_map_type == 0)
- {
- for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++)
- {
- ue_v(stream, &(picParam->run_length_minus1[iGroup]));
- }
- }
- else if (picParam->slice_group_map_type == 2)
- { // MC_CHECK <= or <
- for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++)
- {
- ue_v(stream, &(picParam->top_left[iGroup]));
- ue_v(stream, &(picParam->bottom_right[iGroup]));
- }
- }
- else if (picParam->slice_group_map_type == 3 ||
- picParam->slice_group_map_type == 4 ||
- picParam->slice_group_map_type == 5)
- {
- BitstreamRead1Bit(stream, (uint*)&(picParam->slice_group_change_direction_flag));
- ue_v(stream, &(picParam->slice_group_change_rate_minus1));
- }
- else if (picParam->slice_group_map_type == 6)
- {
- ue_v(stream, &(picParam->pic_size_in_map_units_minus1));
-
- numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */
- i = picParam->num_slice_groups_minus1;
- while (i > 0)
- {
- numBits++;
- i >>= 1;
- }
-
- i = picParam->seq_parameter_set_id;
- if (decvid->seqParams[i] == NULL)
- {
- status = AVCDEC_FAIL;
- goto clean_up;
- }
-
-
- PicWidthInMbs = decvid->seqParams[i]->pic_width_in_mbs_minus1 + 1;
- PicHeightInMapUnits = decvid->seqParams[i]->pic_height_in_map_units_minus1 + 1 ;
- PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ;
-
- /* information has to be consistent with the seq_param */
- if ((int)picParam->pic_size_in_map_units_minus1 != PicSizeInMapUnits - 1)
- {
- status = AVCDEC_FAIL;
- goto clean_up;
- }
-
- if (picParam->slice_group_id)
- {
- avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id);
- }
- picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits, DEFAULT_ATTR);
- if (picParam->slice_group_id == NULL)
- {
- status = AVCDEC_MEMORY_FAIL;
- goto clean_up;
- }
-
- for (i = 0; i < PicSizeInMapUnits; i++)
- {
- BitstreamReadBits(stream, numBits, &(picParam->slice_group_id[i]));
- }
- }
-
- }
-
- ue_v(stream, &(picParam->num_ref_idx_l0_active_minus1));
- if (picParam->num_ref_idx_l0_active_minus1 > 31)
- {
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- ue_v(stream, &(picParam->num_ref_idx_l1_active_minus1));
- if (picParam->num_ref_idx_l1_active_minus1 > 31)
- {
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- BitstreamRead1Bit(stream, (uint*)&(picParam->weighted_pred_flag));
- BitstreamReadBits(stream, 2, &(picParam->weighted_bipred_idc));
- if (picParam->weighted_bipred_idc > 2)
- {
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- se_v(stream, &(picParam->pic_init_qp_minus26));
- if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)
- {
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- se_v(stream, &(picParam->pic_init_qs_minus26));
- if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)
- {
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- se_v(stream, &(picParam->chroma_qp_index_offset));
- if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)
- {
- status = AVCDEC_FAIL; /* out of range */
- status = AVCDEC_FAIL; /* out of range */
- goto clean_up;
- }
-
- BitstreamReadBits(stream, 3, &pic_parameter_set_id);
- picParam->deblocking_filter_control_present_flag = pic_parameter_set_id >> 2;
- picParam->constrained_intra_pred_flag = (pic_parameter_set_id >> 1) & 1;
- picParam->redundant_pic_cnt_present_flag = pic_parameter_set_id & 1;
-
- return AVCDEC_SUCCESS;
-clean_up:
- if (decvid->picParams[pic_parameter_set_id])
- {
- if (picParam->slice_group_id)
- {
- avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id);
- }
- decvid->picParams[pic_parameter_set_id]->slice_group_id = NULL;
- avcHandle->CBAVC_Free(userData, (int)decvid->picParams[pic_parameter_set_id]);
- decvid->picParams[pic_parameter_set_id] = NULL;
- return status;
- }
- return AVCDEC_SUCCESS;
-}
-
-
-/* FirstPartOfSliceHeader();
- RestOfSliceHeader() */
-/** see subclause 7.4.3 */
-AVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream)
-{
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCPicParamSet *currPPS;
- AVCSeqParamSet *currSPS;
- AVCDec_Status status;
- uint idr_pic_id;
- int slice_type, temp, i;
-
- ue_v(stream, &(sliceHdr->first_mb_in_slice));
- ue_v(stream, (uint*)&slice_type);
-
- if (sliceHdr->first_mb_in_slice != 0)
- {
- if ((int)sliceHdr->slice_type >= 5 && slice_type != (int)sliceHdr->slice_type - 5)
- {
- return AVCDEC_FAIL; /* slice type doesn't follow the first slice in the picture */
- }
- }
- sliceHdr->slice_type = (AVCSliceType) slice_type;
- if (slice_type > 4)
- {
- slice_type -= 5;
- }
-
- if (slice_type == 1 || slice_type > 2)
- {
- return AVCDEC_FAIL;
- }
-
- video->slice_type = (AVCSliceType) slice_type;
-
- ue_v(stream, &(sliceHdr->pic_parameter_set_id));
- /* end FirstPartSliceHeader() */
- /* begin RestOfSliceHeader() */
- /* after getting pic_parameter_set_id, we have to load corresponding SPS and PPS */
- if (sliceHdr->pic_parameter_set_id > 255)
- {
- return AVCDEC_FAIL;
- }
-
- if (decvid->picParams[sliceHdr->pic_parameter_set_id] == NULL)
- return AVCDEC_FAIL; /* PPS doesn't exist */
-
- currPPS = video->currPicParams = decvid->picParams[sliceHdr->pic_parameter_set_id];
-
- if (decvid->seqParams[currPPS->seq_parameter_set_id] == NULL)
- return AVCDEC_FAIL; /* SPS doesn't exist */
-
- currSPS = video->currSeqParams = decvid->seqParams[currPPS->seq_parameter_set_id];
-
- if (currPPS->seq_parameter_set_id != video->seq_parameter_set_id)
- {
- video->seq_parameter_set_id = currPPS->seq_parameter_set_id;
- status = (AVCDec_Status)AVCConfigureSequence(decvid->avcHandle, video, false);
- if (status != AVCDEC_SUCCESS)
- return status;
- video->level_idc = currSPS->level_idc;
- }
-
- /* derived variables from SPS */
- video->MaxFrameNum = 1 << (currSPS->log2_max_frame_num_minus4 + 4);
- // MC_OPTIMIZE
- video->PicWidthInMbs = currSPS->pic_width_in_mbs_minus1 + 1;
- video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ;
- video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ;
- video->PicHeightInMapUnits = currSPS->pic_height_in_map_units_minus1 + 1 ;
- video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ;
- video->FrameHeightInMbs = (2 - currSPS->frame_mbs_only_flag) * video->PicHeightInMapUnits ;
-
- /* derived from PPS */
- video->SliceGroupChangeRate = currPPS->slice_group_change_rate_minus1 + 1;
-
- /* then we can continue decoding slice header */
-
- BitstreamReadBits(stream, currSPS->log2_max_frame_num_minus4 + 4, &(sliceHdr->frame_num));
-
- if (video->currFS == NULL && sliceHdr->frame_num != 0)
- {
- video->prevFrameNum = video->PrevRefFrameNum = sliceHdr->frame_num - 1;
- }
-
- if (!currSPS->frame_mbs_only_flag)
- {
- BitstreamRead1Bit(stream, &(sliceHdr->field_pic_flag));
- if (sliceHdr->field_pic_flag)
- {
- return AVCDEC_FAIL;
- }
- }
-
- /* derived variables from slice header*/
- video->PicHeightInMbs = video->FrameHeightInMbs;
- video->PicHeightInSamplesL = video->PicHeightInMbs * 16;
- video->PicHeightInSamplesC = video->PicHeightInMbs * 8;
- video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs;
-
- if (sliceHdr->first_mb_in_slice >= video->PicSizeInMbs)
- {
- return AVCDEC_FAIL;
- }
- video->MaxPicNum = video->MaxFrameNum;
- video->CurrPicNum = sliceHdr->frame_num;
-
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- if (sliceHdr->frame_num != 0)
- {
- return AVCDEC_FAIL;
- }
- ue_v(stream, &idr_pic_id);
- }
-
- sliceHdr->delta_pic_order_cnt_bottom = 0; /* default value */
- sliceHdr->delta_pic_order_cnt[0] = 0; /* default value */
- sliceHdr->delta_pic_order_cnt[1] = 0; /* default value */
- if (currSPS->pic_order_cnt_type == 0)
- {
- BitstreamReadBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4,
- &(sliceHdr->pic_order_cnt_lsb));
- video->MaxPicOrderCntLsb = 1 << (currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4);
- if (sliceHdr->pic_order_cnt_lsb > video->MaxPicOrderCntLsb - 1)
- return AVCDEC_FAIL; /* out of range */
-
- if (currPPS->pic_order_present_flag)
- {
- se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt_bottom));
- }
- }
- if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag)
- {
- se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[0]));
- if (currPPS->pic_order_present_flag)
- {
- se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[1]));
- }
- }
-
- sliceHdr->redundant_pic_cnt = 0; /* default value */
- if (currPPS->redundant_pic_cnt_present_flag)
- {
- // MC_CHECK
- ue_v(stream, &(sliceHdr->redundant_pic_cnt));
- if (sliceHdr->redundant_pic_cnt > 127) /* out of range */
- return AVCDEC_FAIL;
-
- if (sliceHdr->redundant_pic_cnt > 0) /* redundant picture */
- return AVCDEC_FAIL; /* not supported */
- }
- sliceHdr->num_ref_idx_l0_active_minus1 = currPPS->num_ref_idx_l0_active_minus1;
- sliceHdr->num_ref_idx_l1_active_minus1 = currPPS->num_ref_idx_l1_active_minus1;
-
- if (slice_type == AVC_P_SLICE)
- {
- BitstreamRead1Bit(stream, &(sliceHdr->num_ref_idx_active_override_flag));
- if (sliceHdr->num_ref_idx_active_override_flag)
- {
- ue_v(stream, &(sliceHdr->num_ref_idx_l0_active_minus1));
- }
- else /* the following condition is not allowed if the flag is zero */
- {
- if ((slice_type == AVC_P_SLICE) && currPPS->num_ref_idx_l0_active_minus1 > 15)
- {
- return AVCDEC_FAIL; /* not allowed */
- }
- }
- }
-
-
- if (sliceHdr->num_ref_idx_l0_active_minus1 > 15 ||
- sliceHdr->num_ref_idx_l1_active_minus1 > 15)
- {
- return AVCDEC_FAIL; /* not allowed */
- }
- /* if MbaffFrameFlag =1,
- max value of index is num_ref_idx_l0_active_minus1 for frame MBs and
- 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1 for field MBs */
-
- /* ref_pic_list_reordering() */
- status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
-
-
- if (video->nal_ref_idc != 0)
- {
- dec_ref_pic_marking(video, stream, sliceHdr);
- }
- se_v(stream, &(sliceHdr->slice_qp_delta));
-
- video->QPy = 26 + currPPS->pic_init_qp_minus26 + sliceHdr->slice_qp_delta;
- if (video->QPy > 51 || video->QPy < 0)
- {
- video->QPy = AVC_CLIP3(0, 51, video->QPy);
-// return AVCDEC_FAIL;
- }
- video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)];
-
- video->QPy_div_6 = (video->QPy * 43) >> 8;
- video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6;
-
- video->QPc_div_6 = (video->QPc * 43) >> 8;
- video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;
-
- sliceHdr->slice_alpha_c0_offset_div2 = 0;
- sliceHdr->slice_beta_offset_div_2 = 0;
- sliceHdr->disable_deblocking_filter_idc = 0;
- video->FilterOffsetA = video->FilterOffsetB = 0;
-
- if (currPPS->deblocking_filter_control_present_flag)
- {
- ue_v(stream, &(sliceHdr->disable_deblocking_filter_idc));
- if (sliceHdr->disable_deblocking_filter_idc > 2)
- {
- return AVCDEC_FAIL; /* out of range */
- }
- if (sliceHdr->disable_deblocking_filter_idc != 1)
- {
- se_v(stream, &(sliceHdr->slice_alpha_c0_offset_div2));
- if (sliceHdr->slice_alpha_c0_offset_div2 < -6 ||
- sliceHdr->slice_alpha_c0_offset_div2 > 6)
- {
- return AVCDEC_FAIL;
- }
- video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1;
-
- se_v(stream, &(sliceHdr->slice_beta_offset_div_2));
- if (sliceHdr->slice_beta_offset_div_2 < -6 ||
- sliceHdr->slice_beta_offset_div_2 > 6)
- {
- return AVCDEC_FAIL;
- }
- video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1;
- }
- }
-
- if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3
- && currPPS->slice_group_map_type <= 5)
- {
- /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */
- temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate;
- if (video->PicSizeInMapUnits % video->SliceGroupChangeRate)
- {
- temp++;
- }
- i = 0;
- temp++;
- while (temp)
- {
- temp >>= 1;
- i++;
- }
-
- BitstreamReadBits(stream, i, &(sliceHdr->slice_group_change_cycle));
- video->MapUnitsInSliceGroup0 =
- AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits);
- }
-
- return AVCDEC_SUCCESS;
-}
-
-
-AVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video)
-{
- AVCDec_Status status;
- int CurrFrameNum;
- int UnusedShortTermFrameNum;
- int tmp1 = video->sliceHdr->delta_pic_order_cnt[0];
- int tmp2 = video->sliceHdr->delta_pic_order_cnt[1];
- int tmp3 = video->CurrPicNum;
- int tmp4 = video->sliceHdr->adaptive_ref_pic_marking_mode_flag;
- UnusedShortTermFrameNum = (video->prevFrameNum + 1) % video->MaxFrameNum;
- CurrFrameNum = video->sliceHdr->frame_num;
-
- video->sliceHdr->delta_pic_order_cnt[0] = 0;
- video->sliceHdr->delta_pic_order_cnt[1] = 0;
- while (CurrFrameNum != UnusedShortTermFrameNum)
- {
- video->CurrPicNum = UnusedShortTermFrameNum;
- video->sliceHdr->frame_num = UnusedShortTermFrameNum;
-
- status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);
- if (status != AVCDEC_SUCCESS) /* no buffer available */
- {
- return status;
- }
- DecodePOC(video);
- DPBInitPic(video, UnusedShortTermFrameNum);
-
-
- video->currFS->PicOrderCnt = video->PicOrderCnt;
- video->currFS->FrameNum = video->sliceHdr->frame_num;
-
- /* initialize everything to zero */
- video->currFS->IsOutputted = 0x01;
- video->currFS->IsReference = 3;
- video->currFS->IsLongTerm = 0;
- video->currFS->frame.isReference = TRUE;
- video->currFS->frame.isLongTerm = FALSE;
-
- video->sliceHdr->adaptive_ref_pic_marking_mode_flag = 0;
-
- status = (AVCDec_Status)StorePictureInDPB(avcHandle, video); // MC_CHECK check the return status
- if (status != AVCDEC_SUCCESS)
- {
- return AVCDEC_FAIL;
- }
- video->prevFrameNum = UnusedShortTermFrameNum;
- UnusedShortTermFrameNum = (UnusedShortTermFrameNum + 1) % video->MaxFrameNum;
- }
- video->sliceHdr->frame_num = CurrFrameNum;
- video->CurrPicNum = tmp3;
- video->sliceHdr->delta_pic_order_cnt[0] = tmp1;
- video->sliceHdr->delta_pic_order_cnt[1] = tmp2;
- video->sliceHdr->adaptive_ref_pic_marking_mode_flag = tmp4;
- return AVCDEC_SUCCESS;
-}
-
-/** see subclause 7.4.3.1 */
-AVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type)
-{
- int i;
-
- if (slice_type != AVC_I_SLICE)
- {
- BitstreamRead1Bit(stream, &(sliceHdr->ref_pic_list_reordering_flag_l0));
- if (sliceHdr->ref_pic_list_reordering_flag_l0)
- {
- i = 0;
- do
- {
- ue_v(stream, &(sliceHdr->reordering_of_pic_nums_idc_l0[i]));
- if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 ||
- sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1)
- {
- ue_v(stream, &(sliceHdr->abs_diff_pic_num_minus1_l0[i]));
- if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 &&
- sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 1)
- {
- return AVCDEC_FAIL; /* out of range */
- }
- if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 &&
- sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 2)
- {
- return AVCDEC_FAIL; /* out of range */
- }
- }
- else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2)
- {
- ue_v(stream, &(sliceHdr->long_term_pic_num_l0[i]));
- }
- i++;
- }
- while (sliceHdr->reordering_of_pic_nums_idc_l0[i-1] != 3
- && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ;
- }
- }
- return AVCDEC_SUCCESS;
-}
-
-/** see subclause 7.4.3.3 */
-AVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr)
-{
- int i;
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- BitstreamRead1Bit(stream, &(sliceHdr->no_output_of_prior_pics_flag));
- BitstreamRead1Bit(stream, &(sliceHdr->long_term_reference_flag));
- if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */
- {
- video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */
- }
- else /* used for long-term */
- {
- video->MaxLongTermFrameIdx = 0;
- video->LongTermFrameIdx = 0;
- }
- }
- else
- {
- BitstreamRead1Bit(stream, &(sliceHdr->adaptive_ref_pic_marking_mode_flag));
- if (sliceHdr->adaptive_ref_pic_marking_mode_flag)
- {
- i = 0;
- do
- {
- ue_v(stream, &(sliceHdr->memory_management_control_operation[i]));
- if (sliceHdr->memory_management_control_operation[i] == 1 ||
- sliceHdr->memory_management_control_operation[i] == 3)
- {
- ue_v(stream, &(sliceHdr->difference_of_pic_nums_minus1[i]));
- }
- if (sliceHdr->memory_management_control_operation[i] == 2)
- {
- ue_v(stream, &(sliceHdr->long_term_pic_num[i]));
- }
- if (sliceHdr->memory_management_control_operation[i] == 3 ||
- sliceHdr->memory_management_control_operation[i] == 6)
- {
- ue_v(stream, &(sliceHdr->long_term_frame_idx[i]));
- }
- if (sliceHdr->memory_management_control_operation[i] == 4)
- {
- ue_v(stream, &(sliceHdr->max_long_term_frame_idx_plus1[i]));
- }
- i++;
- }
- while (sliceHdr->memory_management_control_operation[i-1] != 0 && i < MAX_DEC_REF_PIC_MARKING);
- if (i >= MAX_DEC_REF_PIC_MARKING)
- {
- return AVCDEC_FAIL; /* we're screwed!!, not enough memory */
- }
- }
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/* see subclause 8.2.1 Decoding process for picture order count. */
-AVCDec_Status DecodePOC(AVCCommonObj *video)
-{
- AVCSeqParamSet *currSPS = video->currSeqParams;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- int i;
-
- switch (currSPS->pic_order_cnt_type)
- {
- case 0: /* POC MODE 0 , subclause 8.2.1.1 */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->prevPicOrderCntMsb = 0;
- video->prevPicOrderCntLsb = 0;
- }
-
- /* Calculate the MSBs of current picture */
- if (sliceHdr->pic_order_cnt_lsb < video->prevPicOrderCntLsb &&
- (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb) >= (video->MaxPicOrderCntLsb / 2))
- video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb;
- else if (sliceHdr->pic_order_cnt_lsb > video->prevPicOrderCntLsb &&
- (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb) > (video->MaxPicOrderCntLsb / 2))
- video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb;
- else
- video->PicOrderCntMsb = video->prevPicOrderCntMsb;
-
- /* JVT-I010 page 81 is different from JM7.3 */
-
-
- video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;
- video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom;
-
- break;
-
-
- case 1: /* POC MODE 1, subclause 8.2.1.2 */
- /* calculate FrameNumOffset */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->prevFrameNumOffset = 0;
- video->FrameNumOffset = 0;
- }
- else if (video->prevFrameNum > sliceHdr->frame_num)
- {
- video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
- }
- else
- {
- video->FrameNumOffset = video->prevFrameNumOffset;
- }
- /* calculate absFrameNum */
- if (currSPS->num_ref_frames_in_pic_order_cnt_cycle)
- {
- video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num;
- }
- else
- {
- video->absFrameNum = 0;
- }
-
- if (video->absFrameNum > 0 && video->nal_ref_idc == 0)
- {
- video->absFrameNum--;
- }
-
- /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */
- if (video->absFrameNum > 0)
- {
- video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle;
- video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle;
- }
- /* derive expectedDeltaPerPicOrderCntCycle */
- video->expectedDeltaPerPicOrderCntCycle = 0;
- for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++)
- {
- video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i];
- }
- /* derive expectedPicOrderCnt */
- if (video->absFrameNum)
- {
- video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle;
- for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++)
- {
- video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i];
- }
- }
- else
- {
- video->expectedPicOrderCnt = 0;
- }
-
- if (video->nal_ref_idc == 0)
- {
- video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic;
- }
- /* derive TopFieldOrderCnt and BottomFieldOrderCnt */
-
- video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];
- video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1];
-
- video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);
-
-
- break;
-
-
- case 2: /* POC MODE 2, subclause 8.2.1.3 */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->FrameNumOffset = 0;
- }
- else if (video->prevFrameNum > sliceHdr->frame_num)
- {
- video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
- }
- else
- {
- video->FrameNumOffset = video->prevFrameNumOffset;
- }
- /* derive tempPicOrderCnt, we just use PicOrderCnt */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->PicOrderCnt = 0;
- }
- else if (video->nal_ref_idc == 0)
- {
- video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1;
- }
- else
- {
- video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num);
- }
- video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt;
- break;
- default:
- return AVCDEC_FAIL;
- }
-
- return AVCDEC_SUCCESS;
-}
-
-
-AVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- OSCL_UNUSED_ARG(decvid);
- OSCL_UNUSED_ARG(stream);
- return AVCDEC_SUCCESS;
-}
-
-AVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize)
-{
- AVCDec_Status status = AVCDEC_SUCCESS;
- uint i;
- switch (payloadType)
- {
- case 0:
- /* buffering period SEI */
- status = buffering_period(decvid, stream);
- break;
- case 1:
- /* picture timing SEI */
- status = pic_timing(decvid, stream);
- break;
- case 2:
-
- case 3:
-
- case 4:
-
- case 5:
-
- case 8:
-
- case 9:
-
- case 10:
-
- case 11:
-
- case 12:
-
- case 13:
-
- case 14:
-
- case 15:
-
- case 16:
-
- case 17:
- for (i = 0; i < payloadSize; i++)
- {
- BitstreamFlushBits(stream, 8);
- }
- break;
- case 6:
- /* recovery point SEI */
- status = recovery_point(decvid, stream);
- break;
- case 7:
- /* decoded reference picture marking repetition SEI */
- status = dec_ref_pic_marking_repetition(decvid, stream);
- break;
-
- case 18:
- /* motion-constrained slice group set SEI */
- status = motion_constrained_slice_group_set(decvid, stream);
- break;
- default:
- /* reserved_sei_message */
- for (i = 0; i < payloadSize; i++)
- {
- BitstreamFlushBits(stream, 8);
- }
- break;
- }
- BitstreamByteAlign(stream);
- return status;
-}
-
-AVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- AVCSeqParamSet *currSPS;
- uint seq_parameter_set_id;
- uint temp;
- uint i;
- ue_v(stream, &seq_parameter_set_id);
- if (seq_parameter_set_id > 31)
- {
- return AVCDEC_FAIL;
- }
-
-// decvid->common->seq_parameter_set_id = seq_parameter_set_id;
-
- currSPS = decvid->seqParams[seq_parameter_set_id];
- if (currSPS->vui_parameters.nal_hrd_parameters_present_flag)
- {
- for (i = 0; i <= currSPS->vui_parameters.nal_hrd_parameters.cpb_cnt_minus1; i++)
- {
- /* initial_cpb_removal_delay[i] */
- BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- /*initial _cpb_removal_delay_offset[i] */
- BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- }
- }
-
- if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag)
- {
- for (i = 0; i <= currSPS->vui_parameters.vcl_hrd_parameters.cpb_cnt_minus1; i++)
- {
- /* initial_cpb_removal_delay[i] */
- BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- /*initial _cpb_removal_delay_offset[i] */
- BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- }
- }
-
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- AVCSeqParamSet *currSPS;
- uint temp, NumClockTs = 0, time_offset_length = 24, full_timestamp_flag;
- uint i;
-
- currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id];
-
- if (currSPS->vui_parameters.nal_hrd_parameters_present_flag)
- {
- BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp);
- time_offset_length = currSPS->vui_parameters.nal_hrd_parameters.time_offset_length;
- }
- else if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag)
- {
- BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);
- BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp);
- time_offset_length = currSPS->vui_parameters.vcl_hrd_parameters.time_offset_length;
- }
-
- if (currSPS->vui_parameters.pic_struct_present_flag)
- {
- /* pic_struct */
- BitstreamReadBits(stream, 4, &temp);
-
- switch (temp)
- {
- case 0:
- case 1:
- case 2:
- NumClockTs = 1;
- break;
- case 3:
- case 4:
- case 7:
- NumClockTs = 2;
- break;
- case 5:
- case 6:
- case 8:
- NumClockTs = 3;
- break;
- default:
- NumClockTs = 0;
- break;
- }
-
- for (i = 0; i < NumClockTs; i++)
- {
- /* clock_timestamp_flag[i] */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* ct_type */
- BitstreamReadBits(stream, 2, &temp);
- /* nuit_field_based_flag */
- BitstreamRead1Bit(stream, &temp);
- /* counting_type */
- BitstreamReadBits(stream, 5, &temp);
- /* full_timestamp_flag */
- BitstreamRead1Bit(stream, &temp);
- full_timestamp_flag = temp;
- /* discontinuity_flag */
- BitstreamRead1Bit(stream, &temp);
- /* cnt_dropped_flag */
- BitstreamRead1Bit(stream, &temp);
- /* n_frames */
- BitstreamReadBits(stream, 8, &temp);
-
-
- if (full_timestamp_flag)
- {
- /* seconds_value */
- BitstreamReadBits(stream, 6, &temp);
- /* minutes_value */
- BitstreamReadBits(stream, 6, &temp);
- /* hours_value */
- BitstreamReadBits(stream, 5, &temp);
- }
- else
- {
- /* seconds_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* seconds_value */
- BitstreamReadBits(stream, 6, &temp);
- /* minutes_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* minutes_value */
- BitstreamReadBits(stream, 6, &temp);
-
- /* hourss_flag */
- BitstreamRead1Bit(stream, &temp);
-
- if (temp)
- {
- /* hours_value */
- BitstreamReadBits(stream, 5, &temp);
- }
-
- }
- }
- }
-
- if (time_offset_length)
- {
- /* time_offset */
- BitstreamReadBits(stream, time_offset_length, &temp);
- }
- else
- {
- /* time_offset */
- temp = 0;
- }
- }
- }
- }
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- OSCL_UNUSED_ARG(decvid);
- uint temp;
- /* recover_frame_cnt */
- ue_v(stream, &temp);
- /* exact_match_flag */
- BitstreamRead1Bit(stream, &temp);
- /* broken_link_flag */
- BitstreamRead1Bit(stream, &temp);
- /* changing slic_group_idc */
- BitstreamReadBits(stream, 2, &temp);
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- AVCSeqParamSet *currSPS;
- uint temp;
- currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id];
- /* original_idr_flag */
- BitstreamRead1Bit(stream, &temp);
- /* original_frame_num */
- ue_v(stream, &temp);
- if (currSPS->frame_mbs_only_flag == 0)
- {
- /* original_field_pic_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* original_bottom_field_flag */
- BitstreamRead1Bit(stream, &temp);
- }
- }
-
- /* dec_ref_pic_marking(video,stream,sliceHdr); */
-
-
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream)
-{
- OSCL_UNUSED_ARG(decvid);
- uint temp, i, numBits;
- /* num_slice_groups_in_set_minus1 */
- ue_v(stream, &temp);
-
- numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */
- i = temp;
- while (i > 0)
- {
- numBits++;
- i >>= 1;
- }
- for (i = 0; i <= temp; i++)
- {
- /* slice_group_id */
- BitstreamReadBits(stream, numBits, &temp);
- }
- /* exact_sample_value_match_flag */
- BitstreamRead1Bit(stream, &temp);
- /* pan_scan_rect_flag */
- BitstreamRead1Bit(stream, &temp);
- if (temp)
- {
- /* pan_scan_rect_id */
- ue_v(stream, &temp);
- }
-
- return AVCDEC_SUCCESS;
-}
-
diff --git a/media/libstagefright/codecs/avc/dec/src/itrans.cpp b/media/libstagefright/codecs/avc/dec/src/itrans.cpp
deleted file mode 100644
index 02c550d..0000000
--- a/media/libstagefright/codecs/avc/dec/src/itrans.cpp
+++ /dev/null
@@ -1,307 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avclib_common.h"
-
-/* input are in the first 16 elements of block,
- output must be in the location specified in Figure 8-6. */
-/* subclause 8.5.6 */
-void Intra16DCTrans(int16 *block, int Qq, int Rq)
-{
- int m0, m1, m2, m3;
- int j, offset;
- int16 *inout;
- int scale = dequant_coefres[Rq][0];
-
- inout = block;
- for (j = 0; j < 4; j++)
- {
- m0 = inout[0] + inout[4];
- m1 = inout[0] - inout[4];
- m2 = inout[8] + inout[12];
- m3 = inout[8] - inout[12];
-
-
- inout[0] = m0 + m2;
- inout[4] = m0 - m2;
- inout[8] = m1 - m3;
- inout[12] = m1 + m3;
- inout += 64;
- }
-
- inout = block;
-
- if (Qq >= 2) /* this way should be faster than JM */
- { /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */
- Qq -= 2;
- for (j = 0; j < 4; j++)
- {
- m0 = inout[0] + inout[64];
- m1 = inout[0] - inout[64];
- m2 = inout[128] + inout[192];
- m3 = inout[128] - inout[192];
-
- inout[0] = ((m0 + m2) * scale) << Qq;
- inout[64] = ((m0 - m2) * scale) << Qq;
- inout[128] = ((m1 - m3) * scale) << Qq;
- inout[192] = ((m1 + m3) * scale) << Qq;
- inout += 4;
- }
- }
- else
- {
- Qq = 2 - Qq;
- offset = 1 << (Qq - 1);
-
- for (j = 0; j < 4; j++)
- {
- m0 = inout[0] + inout[64];
- m1 = inout[0] - inout[64];
- m2 = inout[128] + inout[192];
- m3 = inout[128] - inout[192];
-
- inout[0] = (((m0 + m2) * scale + offset) >> Qq);
- inout[64] = (((m0 - m2) * scale + offset) >> Qq);
- inout[128] = (((m1 - m3) * scale + offset) >> Qq);
- inout[192] = (((m1 + m3) * scale + offset) >> Qq);
- inout += 4;
- }
- }
-
- return ;
-}
-
-/* see subclase 8.5.8 */
-void itrans(int16 *block, uint8 *pred, uint8 *cur, int width)
-{
- int e0, e1, e2, e3; /* note, at every step of the calculation, these values */
- /* shall never exceed 16bit sign value, but we don't check */
- int i; /* to save the cycles. */
- int16 *inout;
-
- inout = block;
-
- for (i = 4; i > 0; i--)
- {
- e0 = inout[0] + inout[2];
- e1 = inout[0] - inout[2];
- e2 = (inout[1] >> 1) - inout[3];
- e3 = inout[1] + (inout[3] >> 1);
-
- inout[0] = e0 + e3;
- inout[1] = e1 + e2;
- inout[2] = e1 - e2;
- inout[3] = e0 - e3;
-
- inout += 16;
- }
-
- for (i = 4; i > 0; i--)
- {
- e0 = block[0] + block[32];
- e1 = block[0] - block[32];
- e2 = (block[16] >> 1) - block[48];
- e3 = block[16] + (block[48] >> 1);
-
- e0 += e3;
- e3 = (e0 - (e3 << 1)); /* e0-e3 */
- e1 += e2;
- e2 = (e1 - (e2 << 1)); /* e1-e2 */
- e0 += 32;
- e1 += 32;
- e2 += 32;
- e3 += 32;
-#ifdef USE_PRED_BLOCK
- e0 = pred[0] + (e0 >> 6);
- if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */
- e1 = pred[20] + (e1 >> 6);
- if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */
- e2 = pred[40] + (e2 >> 6);
- if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */
- e3 = pred[60] + (e3 >> 6);
- if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */
- *cur = e0;
- *(cur += width) = e1;
- *(cur += width) = e2;
- cur[width] = e3;
- cur -= (width << 1);
- cur++;
- pred++;
-#else
- OSCL_UNUSED_ARG(pred);
-
- e0 = *cur + (e0 >> 6);
- if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */
- *cur = e0;
- e1 = *(cur += width) + (e1 >> 6);
- if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */
- *cur = e1;
- e2 = *(cur += width) + (e2 >> 6);
- if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */
- *cur = e2;
- e3 = cur[width] + (e3 >> 6);
- if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */
- cur[width] = e3;
- cur -= (width << 1);
- cur++;
-#endif
- block++;
- }
-
- return ;
-}
-
-/* see subclase 8.5.8 */
-void ictrans(int16 *block, uint8 *pred, uint8 *cur, int width)
-{
- int e0, e1, e2, e3; /* note, at every step of the calculation, these values */
- /* shall never exceed 16bit sign value, but we don't check */
- int i; /* to save the cycles. */
- int16 *inout;
-
- inout = block;
-
- for (i = 4; i > 0; i--)
- {
- e0 = inout[0] + inout[2];
- e1 = inout[0] - inout[2];
- e2 = (inout[1] >> 1) - inout[3];
- e3 = inout[1] + (inout[3] >> 1);
-
- inout[0] = e0 + e3;
- inout[1] = e1 + e2;
- inout[2] = e1 - e2;
- inout[3] = e0 - e3;
-
- inout += 16;
- }
-
- for (i = 4; i > 0; i--)
- {
- e0 = block[0] + block[32];
- e1 = block[0] - block[32];
- e2 = (block[16] >> 1) - block[48];
- e3 = block[16] + (block[48] >> 1);
-
- e0 += e3;
- e3 = (e0 - (e3 << 1)); /* e0-e3 */
- e1 += e2;
- e2 = (e1 - (e2 << 1)); /* e1-e2 */
- e0 += 32;
- e1 += 32;
- e2 += 32;
- e3 += 32;
-#ifdef USE_PRED_BLOCK
- e0 = pred[0] + (e0 >> 6);
- if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */
- e1 = pred[12] + (e1 >> 6);
- if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */
- e2 = pred[24] + (e2 >> 6);
- if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */
- e3 = pred[36] + (e3 >> 6);
- if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */
- *cur = e0;
- *(cur += width) = e1;
- *(cur += width) = e2;
- cur[width] = e3;
- cur -= (width << 1);
- cur++;
- pred++;
-#else
- OSCL_UNUSED_ARG(pred);
-
- e0 = *cur + (e0 >> 6);
- if ((uint)e0 > 0xFF) e0 = 0xFF & (~(e0 >> 31)); /* clip */
- *cur = e0;
- e1 = *(cur += width) + (e1 >> 6);
- if ((uint)e1 > 0xFF) e1 = 0xFF & (~(e1 >> 31)); /* clip */
- *cur = e1;
- e2 = *(cur += width) + (e2 >> 6);
- if ((uint)e2 > 0xFF) e2 = 0xFF & (~(e2 >> 31)); /* clip */
- *cur = e2;
- e3 = cur[width] + (e3 >> 6);
- if ((uint)e3 > 0xFF) e3 = 0xFF & (~(e3 >> 31)); /* clip */
- cur[width] = e3;
- cur -= (width << 1);
- cur++;
-#endif
- block++;
- }
-
- return ;
-}
-
-/* see subclause 8.5.7 */
-void ChromaDCTrans(int16 *block, int Qq, int Rq)
-{
- int c00, c01, c10, c11;
- int f0, f1, f2, f3;
- int scale = dequant_coefres[Rq][0];
-
- c00 = block[0] + block[4];
- c01 = block[0] - block[4];
- c10 = block[64] + block[68];
- c11 = block[64] - block[68];
-
- f0 = c00 + c10;
- f1 = c01 + c11;
- f2 = c00 - c10;
- f3 = c01 - c11;
-
- if (Qq >= 1)
- {
- Qq -= 1;
- block[0] = (f0 * scale) << Qq;
- block[4] = (f1 * scale) << Qq;
- block[64] = (f2 * scale) << Qq;
- block[68] = (f3 * scale) << Qq;
- }
- else
- {
- block[0] = (f0 * scale) >> 1;
- block[4] = (f1 * scale) >> 1;
- block[64] = (f2 * scale) >> 1;
- block[68] = (f3 * scale) >> 1;
- }
-
- return ;
-}
-
-
-void copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch)
-{
- uint32 temp;
-
- temp = *((uint32*)pred);
- pred += pred_pitch;
- *((uint32*)cur) = temp;
- cur += width;
- temp = *((uint32*)pred);
- pred += pred_pitch;
- *((uint32*)cur) = temp;
- cur += width;
- temp = *((uint32*)pred);
- pred += pred_pitch;
- *((uint32*)cur) = temp;
- cur += width;
- temp = *((uint32*)pred);
- *((uint32*)cur) = temp;
-
- return ;
-}
-
-
diff --git a/media/libstagefright/codecs/avc/dec/src/pred_inter.cpp b/media/libstagefright/codecs/avc/dec/src/pred_inter.cpp
deleted file mode 100644
index ba36c37..0000000
--- a/media/libstagefright/codecs/avc/dec/src/pred_inter.cpp
+++ /dev/null
@@ -1,2329 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcdec_lib.h"
-
-
-#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
- x = 0xFF & (~(x>>31));}
-
-/* (blkwidth << 2) + (dy << 1) + dx */
-static void (*const ChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) =
-{
- &ChromaFullMC_SIMD,
- &ChromaHorizontalMC_SIMD,
- &ChromaVerticalMC_SIMD,
- &ChromaDiagonalMC_SIMD,
- &ChromaFullMC_SIMD,
- &ChromaHorizontalMC2_SIMD,
- &ChromaVerticalMC2_SIMD,
- &ChromaDiagonalMC2_SIMD
-};
-/* Perform motion prediction and compensation with residue if exist. */
-void InterMBPrediction(AVCCommonObj *video)
-{
- AVCMacroblock *currMB = video->currMB;
- AVCPictureData *currPic = video->currPic;
- int mbPartIdx, subMbPartIdx;
- int ref_idx;
- int offset_MbPart_indx = 0;
- int16 *mv;
- uint32 x_pos, y_pos;
- uint8 *curL, *curCb, *curCr;
- uint8 *ref_l, *ref_Cb, *ref_Cr;
- uint8 *predBlock, *predCb, *predCr;
- int block_x, block_y, offset_x, offset_y, offsetP, offset;
- int x_position = (video->mb_x << 4);
- int y_position = (video->mb_y << 4);
- int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx;
- int picWidth = currPic->pitch;
- int picHeight = currPic->height;
- int16 *dataBlock;
- uint32 cbp4x4;
- uint32 tmp_word;
-
- tmp_word = y_position * picWidth;
- curL = currPic->Sl + tmp_word + x_position;
- offset = (tmp_word >> 2) + (x_position >> 1);
- curCb = currPic->Scb + offset;
- curCr = currPic->Scr + offset;
-
-#ifdef USE_PRED_BLOCK
- predBlock = video->pred + 84;
- predCb = video->pred + 452;
- predCr = video->pred + 596;
-#else
- predBlock = curL;
- predCb = curCb;
- predCr = curCr;
-#endif
-
- GetMotionVectorPredictor(video, false);
-
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- MbHeight = currMB->SubMbPartHeight[mbPartIdx];
- MbWidth = currMB->SubMbPartWidth[mbPartIdx];
- mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1);
- mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1;
- ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X];
- offset_indx = 0;
-
- ref_l = video->RefPicList0[ref_idx]->Sl;
- ref_Cb = video->RefPicList0[ref_idx]->Scb;
- ref_Cr = video->RefPicList0[ref_idx]->Scr;
-
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1); // check this
- block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1);
- mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));
- offset_x = x_position + (block_x << 2);
- offset_y = y_position + (block_y << 2);
- x_pos = (offset_x << 2) + *mv++; /*quarter pel */
- y_pos = (offset_y << 2) + *mv; /*quarter pel */
-
- //offset = offset_y * currPic->width;
- //offsetC = (offset >> 2) + (offset_x >> 1);
-#ifdef USE_PRED_BLOCK
- offsetP = (block_y * 80) + (block_x << 2);
- LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos,
- /*comp_Sl + offset + offset_x,*/
- predBlock + offsetP, 20, MbWidth, MbHeight);
-#else
- offsetP = (block_y << 2) * picWidth + (block_x << 2);
- LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos,
- /*comp_Sl + offset + offset_x,*/
- predBlock + offsetP, picWidth, MbWidth, MbHeight);
-#endif
-
-#ifdef USE_PRED_BLOCK
- offsetP = (block_y * 24) + (block_x << 1);
- ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scb + offsetC,*/
- predCb + offsetP, 12, MbWidth >> 1, MbHeight >> 1);
- ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scr + offsetC,*/
- predCr + offsetP, 12, MbWidth >> 1, MbHeight >> 1);
-#else
- offsetP = (block_y * picWidth) + (block_x << 1);
- ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scb + offsetC,*/
- predCb + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1);
- ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scr + offsetC,*/
- predCr + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1);
-#endif
-
- offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;
- }
- offset_MbPart_indx = currMB->MbPartWidth >> 4;
- }
-
- /* used in decoder, used to be if(!encFlag) */
-
- /* transform in raster scan order */
- dataBlock = video->block;
- cbp4x4 = video->cbp4x4;
- /* luma */
- for (block_y = 4; block_y > 0; block_y--)
- {
- for (block_x = 4; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- itrans(dataBlock, predBlock, predBlock, 20);
- }
-#else
- if (cbp4x4&1)
- {
- itrans(dataBlock, curL, curL, picWidth);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- predBlock += 4;
-#else
- curL += 4;
-#endif
- }
- dataBlock += 48;
-#ifdef USE_PRED_BLOCK
- predBlock += 64;
-#else
- curL += ((picWidth << 2) - 16);
-#endif
- }
-
- /* chroma */
- picWidth = (picWidth >> 1);
- for (block_y = 2; block_y > 0; block_y--)
- {
- for (block_x = 2; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- ictrans(dataBlock, predCb, predCb, 12);
- }
-#else
- if (cbp4x4&1)
- {
- ictrans(dataBlock, curCb, curCb, picWidth);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- predCb += 4;
-#else
- curCb += 4;
-#endif
- }
- for (block_x = 2; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- ictrans(dataBlock, predCr, predCr, 12);
- }
-#else
- if (cbp4x4&1)
- {
- ictrans(dataBlock, curCr, curCr, picWidth);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- predCr += 4;
-#else
- curCr += 4;
-#endif
- }
- dataBlock += 48;
-#ifdef USE_PRED_BLOCK
- predCb += 40;
- predCr += 40;
-#else
- curCb += ((picWidth << 2) - 8);
- curCr += ((picWidth << 2) - 8);
-#endif
- }
-
-#ifdef MB_BASED_DEBLOCK
- SaveNeighborForIntraPred(video, offset);
-#endif
-
- return ;
-}
-
-
-/* preform the actual motion comp here */
-void LumaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight)
-{
- int dx, dy;
- uint8 temp[24][24]; /* for padding, make the size multiple of 4 for packing */
- int temp2[21][21]; /* for intermediate results */
- uint8 *ref2;
-
- dx = x_pos & 3;
- dy = y_pos & 3;
- x_pos = x_pos >> 2; /* round it to full-pel resolution */
- y_pos = y_pos >> 2;
-
- /* perform actual motion compensation */
- if (dx == 0 && dy == 0)
- { /* fullpel position *//* G */
- if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight)
- {
- ref += y_pos * picwidth + x_pos;
- FullPelMC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight);
- }
- else
- {
- CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth, blkheight);
- FullPelMC(&temp[0][0], 24, pred, pred_pitch, blkwidth, blkheight);
- }
-
- } /* other positions */
- else if (dy == 0)
- { /* no vertical interpolation *//* a,b,c*/
-
- if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight)
- {
- ref += y_pos * picwidth + x_pos;
-
- HorzInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- else /* need padding */
- {
- CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos, &temp[0][0], blkwidth + 5, blkheight);
-
- HorzInterp1MC(&temp[0][2], 24, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- }
- else if (dx == 0)
- { /*no horizontal interpolation *//* d,h,n */
-
- if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)
- {
- ref += y_pos * picwidth + x_pos;
-
- VertInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- else /* need padding */
- {
- CreatePad(ref, picwidth, picheight, x_pos, y_pos - 2, &temp[0][0], blkwidth, blkheight + 5);
-
- VertInterp1MC(&temp[2][0], 24, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- }
- else if (dy == 2)
- { /* horizontal cross *//* i, j, k */
-
- if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)
- {
- ref += y_pos * picwidth + x_pos - 2; /* move to the left 2 pixels */
-
- VertInterp2MC(ref, picwidth, &temp2[0][0], 21, blkwidth + 5, blkheight);
-
- HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- else /* need padding */
- {
- CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5);
-
- VertInterp2MC(&temp[2][0], 24, &temp2[0][0], 21, blkwidth + 5, blkheight);
-
- HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- }
- else if (dx == 2)
- { /* vertical cross */ /* f,q */
-
- if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)
- {
- ref += (y_pos - 2) * picwidth + x_pos; /* move to up 2 lines */
-
- HorzInterp3MC(ref, picwidth, &temp2[0][0], 21, blkwidth, blkheight + 5);
- VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- else /* need padding */
- {
- CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5);
- HorzInterp3MC(&temp[0][2], 24, &temp2[0][0], 21, blkwidth, blkheight + 5);
- VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- }
- else
- { /* diagonal *//* e,g,p,r */
-
- if (x_pos - 2 >= 0 && x_pos + 3 + (dx / 2) + blkwidth <= picwidth &&
- y_pos - 2 >= 0 && y_pos + 3 + blkheight + (dy / 2) <= picheight)
- {
- ref2 = ref + (y_pos + (dy / 2)) * picwidth + x_pos;
-
- ref += (y_pos * picwidth) + x_pos + (dx / 2);
-
- DiagonalInterpMC(ref2, ref, picwidth, pred, pred_pitch, blkwidth, blkheight);
- }
- else /* need padding */
- {
- CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5 + (dx / 2), blkheight + 5 + (dy / 2));
-
- ref2 = &temp[2 + (dy/2)][2];
-
- ref = &temp[2][2 + (dx/2)];
-
- DiagonalInterpMC(ref2, ref, 24, pred, pred_pitch, blkwidth, blkheight);
- }
- }
-
- return ;
-}
-
-void CreateAlign(uint8 *ref, int picwidth, int y_pos,
- uint8 *out, int blkwidth, int blkheight)
-{
- int i, j;
- int offset, out_offset;
- uint32 prev_pix, result, pix1, pix2, pix4;
-
- out_offset = 24 - blkwidth;
-
- //switch(x_pos&0x3){
- switch (((uint32)ref)&0x3)
- {
- case 1:
- ref += y_pos * picwidth;
- offset = picwidth - blkwidth - 3;
- for (j = 0; j < blkheight; j++)
- {
- pix1 = *ref++;
- pix2 = *((uint16*)ref);
- ref += 2;
- result = (pix2 << 8) | pix1;
-
- for (i = 3; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 8; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- case 2:
- ref += y_pos * picwidth;
- offset = picwidth - blkwidth - 2;
- for (j = 0; j < blkheight; j++)
- {
- result = *((uint16*)ref);
- ref += 2;
- for (i = 2; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 16; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- case 3:
- ref += y_pos * picwidth;
- offset = picwidth - blkwidth - 1;
- for (j = 0; j < blkheight; j++)
- {
- result = *ref++;
- for (i = 1; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 24; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- }
-}
-
-void CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos,
- uint8 *out, int blkwidth, int blkheight)
-{
- int x_inc0, x_mid;
- int y_inc, y_inc0, y_inc1, y_mid;
- int i, j;
- int offset;
-
- if (x_pos < 0)
- {
- x_inc0 = 0; /* increment for the first part */
- x_mid = ((blkwidth + x_pos > 0) ? -x_pos : blkwidth); /* stopping point */
- x_pos = 0;
- }
- else if (x_pos + blkwidth > picwidth)
- {
- x_inc0 = 1; /* increasing */
- x_mid = ((picwidth > x_pos) ? picwidth - x_pos - 1 : 0); /* clip negative to zero, encode fool proof! */
- }
- else /* normal case */
- {
- x_inc0 = 1;
- x_mid = blkwidth; /* just one run */
- }
-
-
- /* boundary for y_pos, taking the result from x_pos into account */
- if (y_pos < 0)
- {
- y_inc0 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* offset depending on x_inc1 and x_inc0 */
- y_inc1 = picwidth + y_inc0;
- y_mid = ((blkheight + y_pos > 0) ? -y_pos : blkheight); /* clip to prevent memory corruption */
- y_pos = 0;
- }
- else if (y_pos + blkheight > picheight)
- {
- y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* saturate */
- y_inc0 = picwidth + y_inc1; /* increasing */
- y_mid = ((picheight > y_pos) ? picheight - 1 - y_pos : 0);
- }
- else /* normal case */
- {
- y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid);
- y_inc0 = picwidth + y_inc1;
- y_mid = blkheight;
- }
-
- /* clip y_pos and x_pos */
- if (y_pos > picheight - 1) y_pos = picheight - 1;
- if (x_pos > picwidth - 1) x_pos = picwidth - 1;
-
- ref += y_pos * picwidth + x_pos;
-
- y_inc = y_inc0; /* start with top half */
-
- offset = 24 - blkwidth; /* to use in offset out */
- blkwidth -= x_mid; /* to use in the loop limit */
-
- if (x_inc0 == 0)
- {
- for (j = 0; j < blkheight; j++)
- {
- if (j == y_mid) /* put a check here to reduce the code size (for unrolling the loop) */
- {
- y_inc = y_inc1; /* switch to lower half */
- }
- for (i = x_mid; i > 0; i--) /* first or third quarter */
- {
- *out++ = *ref;
- }
- for (i = blkwidth; i > 0; i--) /* second or fourth quarter */
- {
- *out++ = *ref++;
- }
- out += offset;
- ref += y_inc;
- }
- }
- else
- {
- for (j = 0; j < blkheight; j++)
- {
- if (j == y_mid) /* put a check here to reduce the code size (for unrolling the loop) */
- {
- y_inc = y_inc1; /* switch to lower half */
- }
- for (i = x_mid; i > 0; i--) /* first or third quarter */
- {
- *out++ = *ref++;
- }
- for (i = blkwidth; i > 0; i--) /* second or fourth quarter */
- {
- *out++ = *ref;
- }
- out += offset;
- ref += y_inc;
- }
- }
-
- return ;
-}
-
-void HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx)
-{
- uint8 *p_ref;
- uint32 *p_cur;
- uint32 tmp, pkres;
- int result, curr_offset, ref_offset;
- int j;
- int32 r0, r1, r2, r3, r4, r5;
- int32 r13, r6;
-
- p_cur = (uint32*)out; /* assume it's word aligned */
- curr_offset = (outpitch - blkwidth) >> 2;
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- if (dx&1)
- {
- dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
- p_ref -= 2;
- r13 = 0;
- for (j = blkheight; j > 0; j--)
- {
- tmp = (uint32)(p_ref + blkwidth);
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
- while ((uint32)p_ref < tmp)
- {
- r2 = *(p_ref += 4); /* move pointer to e */
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
-
- r5 = p_ref[dx+2];
- r6 = p_ref[dx+4];
- r5 |= (r6 << 16);
- r4 += r5;
- r4 += 0x10001;
- r4 = (r4 >> 1) & 0xFF00FF;
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
-
- r0 = p_ref[dx+3];
- r1 = p_ref[dx+5];
- r0 |= (r1 << 16);
- r5 += r0;
- r5 += 0x10001;
- r5 = (r5 >> 1) & 0xFF00FF;
-
- r4 |= (r5 << 8); /* pack them together */
- *p_cur++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_cur -= (outpitch >> 2);
-
- tmp = (uint32)(p_ref + blkwidth);
- for (; (uint32)p_ref < tmp;)
- {
-
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- pkres = (result >> 1) ;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 5; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
- }
- else
- {
- p_ref -= 2;
- r13 = 0;
- for (j = blkheight; j > 0; j--)
- {
- tmp = (uint32)(p_ref + blkwidth);
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
- while ((uint32)p_ref < tmp)
- {
- r2 = *(p_ref += 4); /* move pointer to e */
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
- r4 &= 0xFF00FF; /* mask */
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
- r5 &= 0xFF00FF; /* mask */
-
- r4 |= (r5 << 8); /* pack them together */
- *p_cur++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_cur -= (outpitch >> 2);
-
- tmp = (uint32)(p_ref + blkwidth);
- for (; (uint32)p_ref < tmp;)
- {
-
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 5;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset;
- }
- }
- }
-
- return ;
-}
-
-void HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx)
-{
- int *p_ref;
- uint32 *p_cur;
- uint32 tmp, pkres;
- int result, result2, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = (uint32*)out; /* assume it's word aligned */
- curr_offset = (outpitch - blkwidth) >> 2;
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- if (dx&1)
- {
- dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
-
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = (uint32)(p_ref + blkwidth);
- for (; (uint32)p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- pkres = (result >> 1);
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 3; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
- else
- {
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = (uint32)(p_ref + blkwidth);
- for (; (uint32)p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 3; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
-
- return ;
-}
-
-void HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight)
-{
- uint8 *p_ref;
- int *p_cur;
- uint32 tmp;
- int result, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = (outpitch - blkwidth);
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = (uint32)(p_ref + blkwidth);
- for (; (uint32)p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- *p_cur++ = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- *p_cur++ = result;
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- *p_cur++ = result;
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- *p_cur++ = result;
- p_ref -= 3; /* move back to the middle of the filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset;
- }
-
- return ;
-}
-void VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy)
-{
- uint8 *p_cur, *p_ref;
- uint32 tmp;
- int result, curr_offset, ref_offset;
- int j, i;
- int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13;
- uint8 tmp_in[24][24];
-
- /* not word-aligned */
- if (((uint32)in)&0x3)
- {
- CreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
- in = &tmp_in[2][0];
- inpitch = 24;
- }
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- curr_offset += 3;
-
- if (dy&1)
- {
- dy = (dy >> 1) ? 0 : -inpitch;
-
- for (j = 0; j < blkwidth; j += 4, in += 4)
- {
- r13 = 0;
- p_ref = in;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
-
- r1 = *((uint32*)(p_ref + dy));
- r2 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r0 += r1;
- r6 += r2;
- r0 += 0x10001;
- r6 += 0x10001;
- r0 = (r0 >> 1) & 0xFF00FF;
- r6 = (r6 >> 1) & 0xFF00FF;
-
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in + i;
- p_cur -= outpitch; /* compensate for the first offset */
-
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
- }
- else
- {
- for (j = 0; j < blkwidth; j += 4, in += 4)
- {
- r13 = 0;
- p_ref = in;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
- r0 &= 0xFF00FF;
- r6 &= 0xFF00FF;
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in + i;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
- }
-
- return ;
-}
-
-void VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int *p_cur;
- uint8 *p_ref;
- uint32 tmp;
- int result, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
-
- return ;
-}
-
-void VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy)
-{
- uint8 *p_cur;
- int *p_ref;
- uint32 tmp;
- int result, result2, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- if (dy&1)
- {
- dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch;
-
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
- }
- else
- {
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = (uint32)(p_ref + ref_offset); /* limit */
- while ((uint32)p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
- }
-
- return ;
-}
-
-void DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
- uint8 *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int j, i;
- int result;
- uint8 *p_cur, *p_ref, *p_tmp8;
- int curr_offset, ref_offset;
- uint8 tmp_res[24][24], tmp_in[24][24];
- uint32 *p_tmp;
- uint32 tmp, pkres, tmp_result;
- int32 r0, r1, r2, r3, r4, r5;
- int32 r6, r7, r8, r9, r10, r13;
-
- ref_offset = inpitch - blkwidth;
- p_ref = in1 - 2;
- /* perform horizontal interpolation */
- /* not word-aligned */
- /* It is faster to read 1 byte at time to avoid calling CreateAlign */
- /* if(((uint32)p_ref)&0x3)
- {
- CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight);
- p_ref = &tmp_in[0][0];
- ref_offset = 24-blkwidth;
- }*/
-
- p_tmp = (uint32*) & (tmp_res[0][0]);
- for (j = blkheight; j > 0; j--)
- {
- r13 = 0;
- tmp = (uint32)(p_ref + blkwidth);
-
- //r0 = *((uint32*)p_ref); /* d,c,b,a */
- //r1 = (r0>>8)&0xFF00FF; /* 0,d,0,b */
- //r0 &= 0xFF00FF; /* 0,c,0,a */
- /* It is faster to read 1 byte at a time, */
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
-
- while ((uint32)p_ref < tmp)
- {
- //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */
- //r3 = (r2>>8)&0xFF00FF; /* 0,h,0,f */
- //r2 &= 0xFF00FF; /* 0,g,0,e */
- /* It is faster to read 1 byte at a time, */
- r2 = *(p_ref += 4);
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
- r4 &= 0xFF00FF; /* mask */
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
- r5 &= 0xFF00FF; /* mask */
-
- r4 |= (r5 << 8); /* pack them together */
- *p_tmp++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_tmp -= 6; /* intermediate output */
- tmp = (uint32)(p_ref + blkwidth);
- while ((uint32)p_ref < tmp)
- {
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 24);
-
- *p_tmp++ = pkres; /* write 4 pixel */
- p_ref -= 5;
- }
- p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
- }
- }
-
- /* perform vertical interpolation */
- /* not word-aligned */
- if (((uint32)in2)&0x3)
- {
- CreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
- in2 = &tmp_in[2][0];
- inpitch = 24;
- }
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */
- pkres = blkheight * inpitch; /* reuse it for limit */
-
- curr_offset += 3;
-
- for (j = 0; j < blkwidth; j += 4, in2 += 4)
- {
- r13 = 0;
- p_ref = in2;
- p_tmp8 = &(tmp_res[0][j]); /* intermediate result */
- p_tmp8 -= 24; /* compensate for the first offset */
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = (uint32)(p_ref + pkres); /* limit */
- while ((uint32)p_ref < tmp) /* the loop un-rolled */
- {
- /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign, */
- /*p_ref8 = p_ref-(inpitch<<1); r0 = p_ref8[0]; r1 = p_ref8[2];
- r0 |= (r1<<16); r6 = p_ref8[1]; r1 = p_ref8[3];
- r6 |= (r1<<16); p_ref+=inpitch; */
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- /*p_ref8 = p_ref+(inpitch<<1);
- r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16);
- r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- /*r2 = p_ref[0]; r8 = p_ref[2]; r2 |= (r8<<16);
- r8 = p_ref[1]; r1 = p_ref[3]; r8 |= (r1<<16);*/
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- /*p_ref8 = p_ref-inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
- r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
- r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- /*p_ref8 = p_ref-(inpitch<<1); r2 = p_ref8[0]; r8 = p_ref8[2];
- r2 |= (r8<<16); r8 = p_ref8[1]; r1 = p_ref8[3]; r8 |= (r1<<16);*/
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- /*p_ref8 = p_ref+inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
- r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
- r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
- /* add with horizontal results */
- r10 = *((uint32*)(p_tmp8 += 24));
- r9 = (r10 >> 8) & 0xFF00FF;
- r10 &= 0xFF00FF;
-
- r0 += r10;
- r0 += 0x10001;
- r0 = (r0 >> 1) & 0xFF00FF; /* mask to 8 bytes */
-
- r6 += r9;
- r6 += 0x10001;
- r6 = (r6 >> 1) & 0xFF00FF; /* mask to 8 bytes */
-
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in2 + i;
- p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */
- p_tmp8 -= 24; /* compensate for the first offset */
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = (uint32)(p_ref + pkres); /* limit */
- while ((uint32)p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* modify pointer before loading */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
-
- return ;
-}
-
-/* position G */
-void FullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int i, j;
- int offset_in = inpitch - blkwidth;
- int offset_out = outpitch - blkwidth;
- uint32 temp;
- uint8 byte;
-
- if (((uint32)in)&3)
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 4)
- {
- temp = *in++;
- byte = *in++;
- temp |= (byte << 8);
- byte = *in++;
- temp |= (byte << 16);
- byte = *in++;
- temp |= (byte << 24);
-
- *((uint32*)out) = temp; /* write 4 bytes */
- out += 4;
- }
- out += offset_out;
- in += offset_in;
- }
- }
- else
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 4)
- {
- temp = *((uint32*)in);
- *((uint32*)out) = temp;
- in += 4;
- out += 4;
- }
- out += offset_out;
- in += offset_in;
- }
- }
- return ;
-}
-
-void ChromaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight)
-{
- int dx, dy;
- int offset_dx, offset_dy;
- int index;
- uint8 temp[24][24];
-
- dx = x_pos & 7;
- dy = y_pos & 7;
- offset_dx = (dx + 7) >> 3;
- offset_dy = (dy + 7) >> 3;
- x_pos = x_pos >> 3; /* round it to full-pel resolution */
- y_pos = y_pos >> 3;
-
- if ((x_pos >= 0 && x_pos + blkwidth + offset_dx <= picwidth) && (y_pos >= 0 && y_pos + blkheight + offset_dy <= picheight))
- {
- ref += y_pos * picwidth + x_pos;
- }
- else
- {
- CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth + offset_dx, blkheight + offset_dy);
- ref = &temp[0][0];
- picwidth = 24;
- }
-
- index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7);
-
- (*(ChromaMC_SIMD[index]))(ref, picwidth , dx, dy, pred, pred_pitch, blkwidth, blkheight);
- return ;
-}
-
-
-/* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */
-void ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- int32 r0, r1, r2, r3, result0, result1;
- uint8 temp[288];
- uint8 *ref, *out;
- int i, j;
- int dx_8 = 8 - dx;
- int dy_8 = 8 - dy;
-
- /* horizontal first */
- out = temp;
- for (i = 0; i < blkheight + 1; i++)
- {
- ref = pRef;
- r0 = ref[0];
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 |= (ref[2] << 16);
- result0 = dx_8 * r0;
-
- r1 = ref[1] | (ref[3] << 16);
- result0 += dx * r1;
- *(int32 *)out = result0;
-
- result0 = dx_8 * r1;
-
- r2 = ref[4];
- r0 = r0 >> 16;
- r1 = r0 | (r2 << 16);
- result0 += dx * r1;
- *(int32 *)(out + 16) = result0;
-
- ref += 4;
- out += 4;
- r0 = r2;
- }
- pRef += srcPitch;
- out += (32 - blkwidth);
- }
-
-// pRef -= srcPitch*(blkheight+1);
- ref = temp;
-
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 = *(int32 *)ref;
- r1 = *(int32 *)(ref + 16);
- ref += 32;
- out = pOut;
- for (i = 0; i < (blkheight >> 1); i++)
- {
- result0 = dy_8 * r0 + 0x00200020;
- r2 = *(int32 *)ref;
- result0 += dy * r2;
- result0 >>= 6;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00200020;
- r3 = *(int32 *)(ref + 16);
- result1 += dy * r3;
- result1 >>= 6;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- out += predPitch;
- ref += 32;
-
- result0 = dy_8 * r0 + 0x00200020;
- r2 = *(int32 *)ref;
- result0 += dy * r2;
- result0 >>= 6;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00200020;
- r3 = *(int32 *)(ref + 16);
- result1 += dy * r3;
- result1 >>= 6;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- out += predPitch;
- ref += 32;
- }
- pOut += 4;
- ref = temp + 4; /* since it can only iterate twice max */
- }
- return;
-}
-
-void ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(dy);
- int32 r0, r1, r2, result0, result1;
- uint8 *ref, *out;
- int i, j;
- int dx_8 = 8 - dx;
-
- /* horizontal first */
- for (i = 0; i < blkheight; i++)
- {
- ref = pRef;
- out = pOut;
-
- r0 = ref[0];
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 |= (ref[2] << 16);
- result0 = dx_8 * r0 + 0x00040004;
-
- r1 = ref[1] | (ref[3] << 16);
- result0 += dx * r1;
- result0 >>= 3;
- result0 &= 0x00FF00FF;
-
- result1 = dx_8 * r1 + 0x00040004;
-
- r2 = ref[4];
- r0 = r0 >> 16;
- r1 = r0 | (r2 << 16);
- result1 += dx * r1;
- result1 >>= 3;
- result1 &= 0x00FF00FF;
-
- *(int32 *)out = result0 | (result1 << 8);
-
- ref += 4;
- out += 4;
- r0 = r2;
- }
-
- pRef += srcPitch;
- pOut += predPitch;
- }
- return;
-}
-
-void ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(dx);
- int32 r0, r1, r2, r3, result0, result1;
- int i, j;
- uint8 *ref, *out;
- int dy_8 = 8 - dy;
- /* vertical first */
- for (i = 0; i < blkwidth; i += 4)
- {
- ref = pRef;
- out = pOut;
-
- r0 = ref[0] | (ref[2] << 16);
- r1 = ref[1] | (ref[3] << 16);
- ref += srcPitch;
- for (j = 0; j < blkheight; j++)
- {
- result0 = dy_8 * r0 + 0x00040004;
- r2 = ref[0] | (ref[2] << 16);
- result0 += dy * r2;
- result0 >>= 3;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00040004;
- r3 = ref[1] | (ref[3] << 16);
- result1 += dy * r3;
- result1 >>= 3;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- ref += srcPitch;
- out += predPitch;
- }
- pOut += 4;
- pRef += 4;
- }
- return;
-}
-
-void ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(blkwidth);
- int32 r0, r1, temp0, temp1, result;
- int32 temp[9];
- int32 *out;
- int i, r_temp;
- int dy_8 = 8 - dy;
-
- /* horizontal first */
- out = temp;
- for (i = 0; i < blkheight + 1; i++)
- {
- r_temp = pRef[1];
- temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]);
- temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp);
- r0 = temp0 | (temp1 << 16);
- *out++ = r0;
- pRef += srcPitch;
- }
-
- pRef -= srcPitch * (blkheight + 1);
-
- out = temp;
-
- r0 = *out++;
-
- for (i = 0; i < blkheight; i++)
- {
- result = dy_8 * r0 + 0x00200020;
- r1 = *out++;
- result += dy * r1;
- result >>= 6;
- result &= 0x00FF00FF;
- *(int16 *)pOut = (result >> 8) | (result & 0xFF);
- r0 = r1;
- pOut += predPitch;
- }
- return;
-}
-
-void ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(dy);
- OSCL_UNUSED_ARG(blkwidth);
- int i, temp, temp0, temp1;
-
- /* horizontal first */
- for (i = 0; i < blkheight; i++)
- {
- temp = pRef[1];
- temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3;
- temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3;
-
- *(int16 *)pOut = temp0 | (temp1 << 8);
- pRef += srcPitch;
- pOut += predPitch;
-
- }
- return;
-}
-void ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(dx);
- OSCL_UNUSED_ARG(blkwidth);
- int32 r0, r1, result;
- int i;
- int dy_8 = 8 - dy;
- r0 = pRef[0] | (pRef[1] << 16);
- pRef += srcPitch;
- for (i = 0; i < blkheight; i++)
- {
- result = dy_8 * r0 + 0x00040004;
- r1 = pRef[0] | (pRef[1] << 16);
- result += dy * r1;
- result >>= 3;
- result &= 0x00FF00FF;
- *(int16 *)pOut = (result >> 8) | (result & 0xFF);
- r0 = r1;
- pRef += srcPitch;
- pOut += predPitch;
- }
- return;
-}
-
-void ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- OSCL_UNUSED_ARG(dx);
- OSCL_UNUSED_ARG(dy);
- int i, j;
- int offset_in = srcPitch - blkwidth;
- int offset_out = predPitch - blkwidth;
- uint16 temp;
- uint8 byte;
-
- if (((uint32)pRef)&1)
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 2)
- {
- temp = *pRef++;
- byte = *pRef++;
- temp |= (byte << 8);
- *((uint16*)pOut) = temp; /* write 2 bytes */
- pOut += 2;
- }
- pOut += offset_out;
- pRef += offset_in;
- }
- }
- else
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 2)
- {
- temp = *((uint16*)pRef);
- *((uint16*)pOut) = temp;
- pRef += 2;
- pOut += 2;
- }
- pOut += offset_out;
- pRef += offset_in;
- }
- }
- return ;
-}
diff --git a/media/libstagefright/codecs/avc/dec/src/pred_intra.cpp b/media/libstagefright/codecs/avc/dec/src/pred_intra.cpp
deleted file mode 100644
index 0b613a4..0000000
--- a/media/libstagefright/codecs/avc/dec/src/pred_intra.cpp
+++ /dev/null
@@ -1,1786 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcdec_lib.h"
-
-#define CLIP_COMP *comp++ = (uint8)(((uint)temp>0xFF)? 0xFF&(~(temp>>31)): temp)
-#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
- x = 0xFF & (~(x>>31));}
-
-
-/* We should combine the Intra4x4 functions with residual decoding and compensation */
-AVCStatus IntraMBPrediction(AVCCommonObj *video)
-{
- int component, SubBlock_indx, temp;
- AVCStatus status;
- AVCMacroblock *currMB = video->currMB;
- AVCPictureData *currPic = video->currPic;
- uint8 *curL, *curCb, *curCr;
- uint8 *comp;
- int block_x, block_y, offset;
- int16 *dataBlock = video->block;
- uint8 *predCb, *predCr;
-#ifdef USE_PRED_BLOCK
- uint8 *pred;
-#endif
- int pitch = currPic->pitch;
- uint32 cbp4x4 = video->cbp4x4;
-
- offset = (video->mb_y << 4) * pitch + (video->mb_x << 4);
- curL = currPic->Sl + offset;
-
-#ifdef USE_PRED_BLOCK
- video->pred_block = video->pred + 84; /* point to separate prediction memory */
- pred = video->pred_block;
- video->pred_pitch = 20;
-#else
- video->pred_block = curL; /* point directly to the frame buffer */
- video->pred_pitch = pitch;
-#endif
-
- if (currMB->mbMode == AVC_I4)
- {
- /* luminance first */
- block_x = block_y = 0;
- for (component = 0; component < 4; component++)
- {
- block_x = ((component & 1) << 1);
- block_y = ((component >> 1) << 1);
- comp = curL;// + (block_x<<2) + (block_y<<2)*currPic->pitch;
-
- for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++)
- {
- status = Intra_4x4(video, block_x, block_y, comp);
- if (status != AVC_SUCCESS)
- {
- return status;
- }
- /* transform following the 4x4 prediction, can't be SIMD
- with other blocks. */
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&(1 << ((block_y << 2) + block_x)))
- {
- itrans(dataBlock, pred, pred, 20);
- }
-#else
- if (cbp4x4&(1 << ((block_y << 2) + block_x)))
- {
- itrans(dataBlock, comp, comp, pitch);
- }
-#endif
- temp = SubBlock_indx & 1;
- if (temp)
- {
- block_y++;
- block_x--;
- dataBlock += 60;
-#ifdef USE_PRED_BLOCK
- pred += 76;
-#else
- comp += ((pitch << 2) - 4);
-#endif
- }
- else
- {
- block_x++;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- pred += 4;
-#else
- comp += 4;
-#endif
- }
- }
- if (component&1)
- {
-#ifdef USE_PRED_BLOCK
- pred -= 8;
-#else
- curL += (pitch << 3) - 8;
-#endif
- dataBlock -= 8;
- }
- else
- {
-#ifdef USE_PRED_BLOCK
- pred -= 152;
-#else
- curL += 8;
-#endif
- dataBlock -= 120;
- }
- }
- cbp4x4 >>= 16;
- }
- else /* AVC_I16 */
- {
-#ifdef MB_BASED_DEBLOCK
- video->pintra_pred_top = video->intra_pred_top + (video->mb_x << 4);
- video->pintra_pred_left = video->intra_pred_left + 1;
- video->intra_pred_topleft = video->intra_pred_left[0];
- pitch = 1;
-#else
- video->pintra_pred_top = curL - pitch;
- video->pintra_pred_left = curL - 1;
- if (video->mb_y)
- {
- video->intra_pred_topleft = *(curL - pitch - 1);
- }
-#endif
- switch (currMB->i16Mode)
- {
- case AVC_I16_Vertical: /* Intra_16x16_Vertical */
- /* check availability of top */
- if (video->intraAvailB)
- {
- Intra_16x16_Vertical(video);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- case AVC_I16_Horizontal: /* Intra_16x16_Horizontal */
- /* check availability of left */
- if (video->intraAvailA)
- {
- Intra_16x16_Horizontal(video, pitch);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- case AVC_I16_DC: /* Intra_16x16_DC */
- Intra_16x16_DC(video, pitch);
- break;
- case AVC_I16_Plane: /* Intra_16x16_Plane */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- Intra_16x16_Plane(video, pitch);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- default:
- break;
- }
-
- pitch = currPic->pitch;
-
- /* transform */
- /* can go in raster scan order now */
- /* can be done in SIMD, */
- for (block_y = 4; block_y > 0; block_y--)
- {
- for (block_x = 4; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- itrans(dataBlock, pred, pred, 20);
- }
-#else
- if (cbp4x4&1)
- {
- itrans(dataBlock, curL, curL, pitch);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- pred += 4;
-#else
- curL += 4;
-#endif
- }
- dataBlock += 48;
-#ifdef USE_PRED_BLOCK
- pred += 64;
-#else
- curL += ((pitch << 2) - 16);
-#endif
- }
- }
-
- offset = (offset >> 2) + (video->mb_x << 2); //((video->mb_y << 3)* pitch + (video->mb_x << 3));
- curCb = currPic->Scb + offset;
- curCr = currPic->Scr + offset;
-
-#ifdef MB_BASED_DEBLOCK
- video->pintra_pred_top_cb = video->intra_pred_top_cb + (video->mb_x << 3);
- video->pintra_pred_left_cb = video->intra_pred_left_cb + 1;
- video->intra_pred_topleft_cb = video->intra_pred_left_cb[0];
- video->pintra_pred_top_cr = video->intra_pred_top_cr + (video->mb_x << 3);
- video->pintra_pred_left_cr = video->intra_pred_left_cr + 1;
- video->intra_pred_topleft_cr = video->intra_pred_left_cr[0];
- pitch = 1;
-#else
- pitch >>= 1;
- video->pintra_pred_top_cb = curCb - pitch;
- video->pintra_pred_left_cb = curCb - 1;
- video->pintra_pred_top_cr = curCr - pitch;
- video->pintra_pred_left_cr = curCr - 1;
-
- if (video->mb_y)
- {
- video->intra_pred_topleft_cb = *(curCb - pitch - 1);
- video->intra_pred_topleft_cr = *(curCr - pitch - 1);
- }
-#endif
-
-#ifdef USE_PRED_BLOCK
- predCb = video->pred + 452;
- predCr = predCb + 144;
- video->pred_pitch = 12;
-#else
- predCb = curCb;
- predCr = curCr;
- video->pred_pitch = currPic->pitch >> 1;
-#endif
- /* chrominance */
- switch (currMB->intra_chroma_pred_mode)
- {
- case AVC_IC_DC: /* Intra_Chroma_DC */
- Intra_Chroma_DC(video, pitch, predCb, predCr);
- break;
- case AVC_IC_Horizontal: /* Intra_Chroma_Horizontal */
- if (video->intraAvailA)
- {
- /* check availability of left */
- Intra_Chroma_Horizontal(video, pitch, predCb, predCr);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- case AVC_IC_Vertical: /* Intra_Chroma_Vertical */
- if (video->intraAvailB)
- {
- /* check availability of top */
- Intra_Chroma_Vertical(video, predCb, predCr);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- case AVC_IC_Plane: /* Intra_Chroma_Plane */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- /* check availability of top and left */
- Intra_Chroma_Plane(video, pitch, predCb, predCr);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
- default:
- break;
- }
-
- /* transform, done in raster scan manner */
- pitch = currPic->pitch >> 1;
-
- for (block_y = 2; block_y > 0; block_y--)
- {
- for (block_x = 2; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- ictrans(dataBlock, predCb, predCb, 12);
- }
-#else
- if (cbp4x4&1)
- {
- ictrans(dataBlock, curCb, curCb, pitch);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- predCb += 4;
-#else
- curCb += 4;
-#endif
- }
- for (block_x = 2; block_x > 0; block_x--)
- {
-#ifdef USE_PRED_BLOCK
- if (cbp4x4&1)
- {
- ictrans(dataBlock, predCr, predCr, 12);
- }
-#else
- if (cbp4x4&1)
- {
- ictrans(dataBlock, curCr, curCr, pitch);
- }
-#endif
- cbp4x4 >>= 1;
- dataBlock += 4;
-#ifdef USE_PRED_BLOCK
- predCr += 4;
-#else
- curCr += 4;
-#endif
- }
- dataBlock += 48;
-#ifdef USE_PRED_BLOCK
- predCb += 40;
- predCr += 40;
-#else
- curCb += ((pitch << 2) - 8);
- curCr += ((pitch << 2) - 8);
-#endif
- }
-
-#ifdef MB_BASED_DEBLOCK
- SaveNeighborForIntraPred(video, offset);
-#endif
- return AVC_SUCCESS;
-}
-
-#ifdef MB_BASED_DEBLOCK
-void SaveNeighborForIntraPred(AVCCommonObj *video, int offset)
-{
- AVCPictureData *currPic = video->currPic;
- int pitch;
- uint8 *pred, *predCb, *predCr;
- uint8 *tmp_ptr, tmp_byte;
- uint32 tmp_word;
- int mb_x = video->mb_x;
-
- /* save the value for intra prediction */
-#ifdef USE_PRED_BLOCK
- pitch = 20;
- pred = video->pred + 384; /* bottom line for Y */
- predCb = pred + 152; /* bottom line for Cb */
- predCr = predCb + 144; /* bottom line for Cr */
-#else
- pitch = currPic->pitch;
- tmp_word = offset + (pitch << 2) - (pitch >> 1);
- predCb = currPic->Scb + tmp_word;/* bottom line for Cb */
- predCr = currPic->Scr + tmp_word;/* bottom line for Cr */
-
- offset = (offset << 2) - (mb_x << 4);
- pred = currPic->Sl + offset + (pitch << 4) - pitch;/* bottom line for Y */
-
-#endif
-
- video->intra_pred_topleft = video->intra_pred_top[(mb_x<<4)+15];
- video->intra_pred_topleft_cb = video->intra_pred_top_cb[(mb_x<<3)+7];
- video->intra_pred_topleft_cr = video->intra_pred_top_cr[(mb_x<<3)+7];
-
- /* then copy to video->intra_pred_top, intra_pred_top_cb, intra_pred_top_cr */
- /*memcpy(video->intra_pred_top + (mb_x<<4), pred, 16);
- memcpy(video->intra_pred_top_cb + (mb_x<<3), predCb, 8);
- memcpy(video->intra_pred_top_cr + (mb_x<<3), predCr, 8);*/
- tmp_ptr = video->intra_pred_top + (mb_x << 4);
- *((uint32*)tmp_ptr) = *((uint32*)pred);
- *((uint32*)(tmp_ptr + 4)) = *((uint32*)(pred + 4));
- *((uint32*)(tmp_ptr + 8)) = *((uint32*)(pred + 8));
- *((uint32*)(tmp_ptr + 12)) = *((uint32*)(pred + 12));
- tmp_ptr = video->intra_pred_top_cb + (mb_x << 3);
- *((uint32*)tmp_ptr) = *((uint32*)predCb);
- *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCb + 4));
- tmp_ptr = video->intra_pred_top_cr + (mb_x << 3);
- *((uint32*)tmp_ptr) = *((uint32*)predCr);
- *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCr + 4));
-
-
- /* now save last column */
-#ifdef USE_PRED_BLOCK
- pred = video->pred + 99; /* last column*/
-#else
- pred -= ((pitch << 4) - pitch - 15); /* last column */
-#endif
- tmp_ptr = video->intra_pred_left;
- tmp_word = video->intra_pred_topleft;
- tmp_byte = *(pred);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)tmp_ptr) = tmp_word;
- tmp_word = *(pred += pitch);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)(tmp_ptr += 4)) = tmp_word;
- tmp_word = *(pred += pitch);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)(tmp_ptr += 4)) = tmp_word;
- tmp_word = *(pred += pitch);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(pred += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)(tmp_ptr += 4)) = tmp_word;
- *(tmp_ptr += 4) = *(pred += pitch);
-
- /* now for Cb */
-#ifdef USE_PRED_BLOCK
- predCb = video->pred + 459;
- pitch = 12;
-#else
- pitch >>= 1;
- predCb -= (7 * pitch - 7);
-#endif
- tmp_ptr = video->intra_pred_left_cb;
- tmp_word = video->intra_pred_topleft_cb;
- tmp_byte = *(predCb);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(predCb += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(predCb += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)tmp_ptr) = tmp_word;
- tmp_word = *(predCb += pitch);
- tmp_byte = *(predCb += pitch);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(predCb += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(predCb += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)(tmp_ptr += 4)) = tmp_word;
- *(tmp_ptr += 4) = *(predCb += pitch);
-
- /* now for Cr */
-#ifdef USE_PRED_BLOCK
- predCr = video->pred + 603;
-#else
- predCr -= (7 * pitch - 7);
-#endif
- tmp_ptr = video->intra_pred_left_cr;
- tmp_word = video->intra_pred_topleft_cr;
- tmp_byte = *(predCr);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(predCr += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(predCr += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)tmp_ptr) = tmp_word;
- tmp_word = *(predCr += pitch);
- tmp_byte = *(predCr += pitch);
- tmp_word |= (tmp_byte << 8);
- tmp_byte = *(predCr += pitch);
- tmp_word |= (tmp_byte << 16);
- tmp_byte = *(predCr += pitch);
- tmp_word |= (tmp_byte << 24);
- *((uint32*)(tmp_ptr += 4)) = tmp_word;
- *(tmp_ptr += 4) = *(predCr += pitch);
-
- return ;
-}
-#endif /* MB_BASED_DEBLOCK */
-
-AVCStatus Intra_4x4(AVCCommonObj *video, int block_x, int block_y, uint8 *comp)
-{
- AVCMacroblock *currMB = video->currMB;
- int block_offset;
- AVCNeighborAvailability availability;
- int pitch = video->currPic->pitch;
-
-#ifdef USE_PRED_BLOCK
- block_offset = (block_y * 80) + (block_x << 2);
-#else
- block_offset = (block_y << 2) * pitch + (block_x << 2);
-#endif
-
-#ifdef MB_BASED_DEBLOCK
- /* boundary blocks use video->pred_intra_top, pred_intra_left, pred_intra_topleft */
- if (!block_x)
- {
- video->pintra_pred_left = video->intra_pred_left + 1 + (block_y << 2);
- pitch = 1;
- }
- else
- {
- video->pintra_pred_left = video->pred_block + block_offset - 1;
- pitch = video->pred_pitch;
- }
-
- if (!block_y)
- {
- video->pintra_pred_top = video->intra_pred_top + (block_x << 2) + (video->mb_x << 4);
- }
- else
- {
- video->pintra_pred_top = video->pred_block + block_offset - video->pred_pitch;
- }
-
- if (!block_x)
- {
- video->intra_pred_topleft = video->intra_pred_left[block_y<<2];
- }
- else if (!block_y)
- {
- video->intra_pred_topleft = video->intra_pred_top[(video->mb_x<<4)+(block_x<<2)-1];
- }
- else
- {
- video->intra_pred_topleft = video->pred_block[block_offset - video->pred_pitch - 1];
- }
-
-#else
- /* normal case */
- video->pintra_pred_top = comp - pitch;
- video->pintra_pred_left = comp - 1;
- if (video->mb_y || block_y)
- {
- video->intra_pred_topleft = *(comp - pitch - 1);
- }
-#endif
-
- switch (currMB->i4Mode[(block_y << 2) + block_x])
- {
- case AVC_I4_Vertical: /* Intra_4x4_Vertical */
- if (block_y > 0 || video->intraAvailB)/* to prevent out-of-bound access*/
- {
- Intra_4x4_Vertical(video, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
- case AVC_I4_Horizontal: /* Intra_4x4_Horizontal */
- if (block_x || video->intraAvailA) /* to prevent out-of-bound access */
- {
- Intra_4x4_Horizontal(video, pitch, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
- case AVC_I4_DC: /* Intra_4x4_DC */
- availability.left = TRUE;
- availability.top = TRUE;
- if (!block_y)
- { /* check availability up */
- availability.top = video->intraAvailB ;
- }
- if (!block_x)
- { /* check availability left */
- availability.left = video->intraAvailA ;
- }
- Intra_4x4_DC(video, pitch, block_offset, &availability);
- break;
-
- case AVC_I4_Diagonal_Down_Left: /* Intra_4x4_Diagonal_Down_Left */
- /* lookup table will be more appropriate for this case */
- if (block_y == 0 && !video->intraAvailB)
- {
- return AVC_FAIL;
- }
-
- availability.top_right = BlkTopRight[(block_y<<2) + block_x];
-
- if (availability.top_right == 2)
- {
- availability.top_right = video->intraAvailB;
- }
- else if (availability.top_right == 3)
- {
- availability.top_right = video->intraAvailC;
- }
-
- Intra_4x4_Down_Left(video, block_offset, &availability);
- break;
-
- case AVC_I4_Diagonal_Down_Right: /* Intra_4x4_Diagonal_Down_Right */
- if ((block_y && block_x) /* to prevent out-of-bound access */
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB))
- {
- Intra_4x4_Diagonal_Down_Right(video, pitch, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
- case AVC_I4_Vertical_Right: /* Intra_4x4_Vertical_Right */
- if ((block_y && block_x) /* to prevent out-of-bound access */
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB))
- {
- Intra_4x4_Diagonal_Vertical_Right(video, pitch, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
- case AVC_I4_Horizontal_Down: /* Intra_4x4_Horizontal_Down */
- if ((block_y && block_x) /* to prevent out-of-bound access */
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB))
- {
- Intra_4x4_Diagonal_Horizontal_Down(video, pitch, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
- case AVC_I4_Vertical_Left: /* Intra_4x4_Vertical_Left */
- /* lookup table may be more appropriate for this case */
- if (block_y == 0 && !video->intraAvailB)
- {
- return AVC_FAIL;
- }
-
- availability.top_right = BlkTopRight[(block_y<<2) + block_x];
-
- if (availability.top_right == 2)
- {
- availability.top_right = video->intraAvailB;
- }
- else if (availability.top_right == 3)
- {
- availability.top_right = video->intraAvailC;
- }
-
- Intra_4x4_Vertical_Left(video, block_offset, &availability);
- break;
-
- case AVC_I4_Horizontal_Up: /* Intra_4x4_Horizontal_Up */
- if (block_x || video->intraAvailA)
- {
- Intra_4x4_Horizontal_Up(video, pitch, block_offset);
- }
- else
- {
- return AVC_FAIL;
- }
- break;
-
-
- default:
-
- break;
- }
-
- return AVC_SUCCESS;
-}
-
-
-/* =============================== BEGIN 4x4
-MODES======================================*/
-void Intra_4x4_Vertical(AVCCommonObj *video, int block_offset)
-{
- uint8 *comp_ref = video->pintra_pred_top;
- uint32 temp;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- /*P = (int) *comp_ref++;
- Q = (int) *comp_ref++;
- R = (int) *comp_ref++;
- S = (int) *comp_ref++;
- temp = S|(R<<8)|(Q<<16)|(P<<24);*/
- temp = *((uint32*)comp_ref);
-
- *((uint32*)pred) = temp; /* write 4 at a time */
- pred += pred_pitch;
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset)
-{
- uint8 *comp_ref = video->pintra_pred_left;
- uint32 temp;
- int P;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- P = *comp_ref;
- temp = P | (P << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- comp_ref += pitch;
- P = *comp_ref;
- temp = P | (P << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- comp_ref += pitch;
- P = *comp_ref;
- temp = P | (P << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- comp_ref += pitch;
- P = *comp_ref;
- temp = P | (P << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset,
- AVCNeighborAvailability *availability)
-{
- uint8 *comp_ref = video->pintra_pred_left;
- uint32 temp;
- int DC;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- if (availability->left)
- {
- DC = *comp_ref;
- comp_ref += pitch;
- DC += *comp_ref;
- comp_ref += pitch;
- DC += *comp_ref;
- comp_ref += pitch;
- DC += *comp_ref;
- comp_ref = video->pintra_pred_top;
-
- if (availability->top)
- {
- DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + DC + 4) >> 3;
- }
- else
- {
- DC = (DC + 2) >> 2;
-
- }
- }
- else if (availability->top)
- {
- comp_ref = video->pintra_pred_top;
- DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + 2) >> 2;
-
- }
- else
- {
- DC = 128;
- }
-
- temp = DC | (DC << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- *((uint32*)pred) = temp;
- pred += pred_pitch;
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset,
- AVCNeighborAvailability *availability)
-{
- uint8 *comp_refx = video->pintra_pred_top;
- uint32 temp;
- int r0, r1, r2, r3, r4, r5, r6, r7;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- r0 = *comp_refx++;
- r1 = *comp_refx++;
- r2 = *comp_refx++;
- r3 = *comp_refx++;
- if (availability->top_right)
- {
- r4 = *comp_refx++;
- r5 = *comp_refx++;
- r6 = *comp_refx++;
- r7 = *comp_refx++;
- }
- else
- {
- r4 = r3;
- r5 = r3;
- r6 = r3;
- r7 = r3;
- }
-
- r0 += (r1 << 1);
- r0 += r2;
- r0 += 2;
- r0 >>= 2;
- r1 += (r2 << 1);
- r1 += r3;
- r1 += 2;
- r1 >>= 2;
- r2 += (r3 << 1);
- r2 += r4;
- r2 += 2;
- r2 >>= 2;
- r3 += (r4 << 1);
- r3 += r5;
- r3 += 2;
- r3 >>= 2;
- r4 += (r5 << 1);
- r4 += r6;
- r4 += 2;
- r4 >>= 2;
- r5 += (r6 << 1);
- r5 += r7;
- r5 += 2;
- r5 >>= 2;
- r6 += (3 * r7);
- r6 += 2;
- r6 >>= 2;
-
- temp = r0 | (r1 << 8);
- temp |= (r2 << 16);
- temp |= (r3 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = (temp >> 8) | (r4 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = (temp >> 8) | (r5 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = (temp >> 8) | (r6 << 24);
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int
- block_offset)
-{
- uint8 *comp_refx = video->pintra_pred_top;
- uint8 *comp_refy = video->pintra_pred_left;
- uint32 temp;
- int P_x, Q_x, R_x, P_y, Q_y, R_y, D;
- int x0, x1, x2;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- temp = *((uint32*)comp_refx); /* read 4 bytes */
- x0 = temp & 0xFF;
- x1 = (temp >> 8) & 0xFF;
- x2 = (temp >> 16) & 0xFF;
-
- Q_x = (x0 + 2 * x1 + x2 + 2) >> 2;
- R_x = (x1 + 2 * x2 + (temp >> 24) + 2) >> 2;
-
- x2 = video->intra_pred_topleft; /* re-use x2 instead of y0 */
- P_x = (x2 + 2 * x0 + x1 + 2) >> 2;
-
- x1 = *comp_refy;
- comp_refy += pitch; /* re-use x1 instead of y1 */
- D = (x0 + 2 * x2 + x1 + 2) >> 2;
-
- x0 = *comp_refy;
- comp_refy += pitch; /* re-use x0 instead of y2 */
- P_y = (x2 + 2 * x1 + x0 + 2) >> 2;
-
- x2 = *comp_refy;
- comp_refy += pitch; /* re-use x2 instead of y3 */
- Q_y = (x1 + 2 * x0 + x2 + 2) >> 2;
-
- x1 = *comp_refy; /* re-use x1 instead of y4 */
- R_y = (x0 + 2 * x2 + x1 + 2) >> 2;
-
- /* we can pack these */
- temp = D | (P_x << 8); //[D P_x Q_x R_x]
- //[P_y D P_x Q_x]
- temp |= (Q_x << 16); //[Q_y P_y D P_x]
- temp |= (R_x << 24); //[R_y Q_y P_y D ]
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = P_y | (D << 8);
- temp |= (P_x << 16);
- temp |= (Q_x << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = Q_y | (P_y << 8);
- temp |= (D << 16);
- temp |= (P_x << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = R_y | (Q_y << 8);
- temp |= (P_y << 16);
- temp |= (D << 24);
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset)
-{
- uint8 *comp_refx = video->pintra_pred_top;
- uint8 *comp_refy = video->pintra_pred_left;
- uint32 temp;
- int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D;
- int x0, x1, x2;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- x0 = *comp_refx++;
- x1 = *comp_refx++;
- Q0 = x0 + x1 + 1;
-
- x2 = *comp_refx++;
- R0 = x1 + x2 + 1;
-
- x1 = *comp_refx++; /* reuse x1 instead of x3 */
- S0 = x2 + x1 + 1;
-
- x1 = video->intra_pred_topleft; /* reuse x1 instead of y0 */
- P0 = x1 + x0 + 1;
-
- x2 = *comp_refy;
- comp_refy += pitch; /* reuse x2 instead of y1 */
- D = (x2 + 2 * x1 + x0 + 2) >> 2;
-
- P1 = (P0 + Q0) >> 2;
- Q1 = (Q0 + R0) >> 2;
- R1 = (R0 + S0) >> 2;
-
- P0 >>= 1;
- Q0 >>= 1;
- R0 >>= 1;
- S0 >>= 1;
-
- x0 = *comp_refy;
- comp_refy += pitch; /* reuse x0 instead of y2 */
- P2 = (x1 + 2 * x2 + x0 + 2) >> 2;
- x1 = *comp_refy;
- comp_refy += pitch; /* reuse x1 instead of y3 */
- Q2 = (x2 + 2 * x0 + x1 + 2) >> 2;
-
- temp = P0 | (Q0 << 8); //[P0 Q0 R0 S0]
- //[D P1 Q1 R1]
- temp |= (R0 << 16); //[P2 P0 Q0 R0]
- temp |= (S0 << 24); //[Q2 D P1 Q1]
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = D | (P1 << 8);
- temp |= (Q1 << 16);
- temp |= (R1 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = P2 | (P0 << 8);
- temp |= (Q0 << 16);
- temp |= (R0 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = Q2 | (D << 8);
- temp |= (P1 << 16);
- temp |= (Q1 << 24);
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch,
- int block_offset)
-{
- uint8 *comp_refx = video->pintra_pred_top;
- uint8 *comp_refy = video->pintra_pred_left;
- uint32 temp;
- int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D;
- int x0, x1, x2;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- x0 = *comp_refx++;
- x1 = *comp_refx++;
- x2 = *comp_refx++;
- Q2 = (x0 + 2 * x1 + x2 + 2) >> 2;
-
- x2 = video->intra_pred_topleft; /* reuse x2 instead of y0 */
- P2 = (x2 + 2 * x0 + x1 + 2) >> 2;
-
- x1 = *comp_refy;
- comp_refy += pitch; /* reuse x1 instead of y1 */
- D = (x1 + 2 * x2 + x0 + 2) >> 2;
- P0 = x2 + x1 + 1;
-
- x0 = *comp_refy;
- comp_refy += pitch; /* reuse x0 instead of y2 */
- Q0 = x1 + x0 + 1;
-
- x1 = *comp_refy;
- comp_refy += pitch; /* reuse x1 instead of y3 */
- R0 = x0 + x1 + 1;
-
- x2 = *comp_refy; /* reuse x2 instead of y4 */
- S0 = x1 + x2 + 1;
-
- P1 = (P0 + Q0) >> 2;
- Q1 = (Q0 + R0) >> 2;
- R1 = (R0 + S0) >> 2;
-
- P0 >>= 1;
- Q0 >>= 1;
- R0 >>= 1;
- S0 >>= 1;
-
-
- /* we can pack these */
- temp = P0 | (D << 8); //[P0 D P2 Q2]
- //[Q0 P1 P0 D ]
- temp |= (P2 << 16); //[R0 Q1 Q0 P1]
- temp |= (Q2 << 24); //[S0 R1 R0 Q1]
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = Q0 | (P1 << 8);
- temp |= (P0 << 16);
- temp |= (D << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = R0 | (Q1 << 8);
- temp |= (Q0 << 16);
- temp |= (P1 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = S0 | (R1 << 8);
- temp |= (R0 << 16);
- temp |= (Q1 << 24);
- *((uint32*)pred) = temp;
-
- return ;
-}
-
-void Intra_4x4_Vertical_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability)
-{
- uint8 *comp_refx = video->pintra_pred_top;
- uint32 temp1, temp2;
- int x0, x1, x2, x3, x4, x5, x6;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- x0 = *comp_refx++;
- x1 = *comp_refx++;
- x2 = *comp_refx++;
- x3 = *comp_refx++;
- if (availability->top_right)
- {
- x4 = *comp_refx++;
- x5 = *comp_refx++;
- x6 = *comp_refx++;
- }
- else
- {
- x4 = x3;
- x5 = x3;
- x6 = x3;
- }
-
- x0 += x1 + 1;
- x1 += x2 + 1;
- x2 += x3 + 1;
- x3 += x4 + 1;
- x4 += x5 + 1;
- x5 += x6 + 1;
-
- temp1 = (x0 >> 1);
- temp1 |= ((x1 >> 1) << 8);
- temp1 |= ((x2 >> 1) << 16);
- temp1 |= ((x3 >> 1) << 24);
-
- *((uint32*)pred) = temp1;
- pred += pred_pitch;
-
- temp2 = ((x0 + x1) >> 2);
- temp2 |= (((x1 + x2) >> 2) << 8);
- temp2 |= (((x2 + x3) >> 2) << 16);
- temp2 |= (((x3 + x4) >> 2) << 24);
-
- *((uint32*)pred) = temp2;
- pred += pred_pitch;
-
- temp1 = (temp1 >> 8) | ((x4 >> 1) << 24); /* rotate out old value */
- *((uint32*)pred) = temp1;
- pred += pred_pitch;
-
- temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */
- *((uint32*)pred) = temp2;
- pred += pred_pitch;
-
- return ;
-}
-
-void Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset)
-{
- uint8 *comp_refy = video->pintra_pred_left;
- uint32 temp;
- int Q0, R0, Q1, D0, D1, P0, P1;
- int y0, y1, y2, y3;
- uint8 *pred = video->pred_block + block_offset;
- int pred_pitch = video->pred_pitch;
-
- y0 = *comp_refy;
- comp_refy += pitch;
- y1 = *comp_refy;
- comp_refy += pitch;
- y2 = *comp_refy;
- comp_refy += pitch;
- y3 = *comp_refy;
-
- Q0 = (y1 + y2 + 1) >> 1;
- Q1 = (y1 + (y2 << 1) + y3 + 2) >> 2;
- P0 = ((y0 + y1 + 1) >> 1);
- P1 = ((y0 + (y1 << 1) + y2 + 2) >> 2);
-
- temp = P0 | (P1 << 8); // [P0 P1 Q0 Q1]
- temp |= (Q0 << 16); // [Q0 Q1 R0 DO]
- temp |= (Q1 << 24); // [R0 D0 D1 D1]
- *((uint32*)pred) = temp; // [D1 D1 D1 D1]
- pred += pred_pitch;
-
- D0 = (y2 + 3 * y3 + 2) >> 2;
- R0 = (y2 + y3 + 1) >> 1;
-
- temp = Q0 | (Q1 << 8);
- temp |= (R0 << 16);
- temp |= (D0 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- D1 = y3;
-
- temp = R0 | (D0 << 8);
- temp |= (D1 << 16);
- temp |= (D1 << 24);
- *((uint32*)pred) = temp;
- pred += pred_pitch;
-
- temp = D1 | (D1 << 8);
- temp |= (temp << 16);
- *((uint32*)pred) = temp;
-
- return ;
-}
-/* =============================== END 4x4 MODES======================================*/
-void Intra_16x16_Vertical(AVCCommonObj *video)
-{
- int i;
- uint32 temp1, temp2, temp3, temp4;
- uint8 *comp_ref = video->pintra_pred_top;
- uint8 *pred = video->pred_block;
- int pred_pitch = video->pred_pitch;
-
- temp1 = *((uint32*)comp_ref);
- comp_ref += 4;
-
- temp2 = *((uint32*)comp_ref);
- comp_ref += 4;
-
- temp3 = *((uint32*)comp_ref);
- comp_ref += 4;
-
- temp4 = *((uint32*)comp_ref);
- comp_ref += 4;
-
- i = 16;
- while (i > 0)
- {
- *((uint32*)pred) = temp1;
- *((uint32*)(pred + 4)) = temp2;
- *((uint32*)(pred + 8)) = temp3;
- *((uint32*)(pred + 12)) = temp4;
- pred += pred_pitch;
- i--;
- }
-
- return ;
-}
-
-void Intra_16x16_Horizontal(AVCCommonObj *video, int pitch)
-{
- int i;
- uint32 temp;
- uint8 *comp_ref = video->pintra_pred_left;
- uint8 *pred = video->pred_block;
- int pred_pitch = video->pred_pitch;
-
- for (i = 0; i < 16; i++)
- {
- temp = *comp_ref;
- temp |= (temp << 8);
- temp |= (temp << 16);
- *((uint32*)pred) = temp;
- *((uint32*)(pred + 4)) = temp;
- *((uint32*)(pred + 8)) = temp;
- *((uint32*)(pred + 12)) = temp;
- pred += pred_pitch;
- comp_ref += pitch;
- }
-}
-
-
-void Intra_16x16_DC(AVCCommonObj *video, int pitch)
-{
- int i;
- uint32 temp, temp2;
- uint8 *comp_ref_x = video->pintra_pred_top;
- uint8 *comp_ref_y = video->pintra_pred_left;
- int sum = 0;
- uint8 *pred = video->pred_block;
- int pred_pitch = video->pred_pitch;
-
- if (video->intraAvailB)
- {
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- sum = temp + (temp >> 16);
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- sum += temp + (temp >> 16);
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- sum += temp + (temp >> 16);
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- sum += temp + (temp >> 16);
- sum &= 0xFFFF;
-
- if (video->intraAvailA)
- {
- for (i = 0; i < 16; i++)
- {
- sum += (*comp_ref_y);
- comp_ref_y += pitch;
- }
- sum = (sum + 16) >> 5;
- }
- else
- {
- sum = (sum + 8) >> 4;
- }
- }
- else if (video->intraAvailA)
- {
- for (i = 0; i < 16; i++)
- {
- sum += *comp_ref_y;
- comp_ref_y += pitch;
- }
- sum = (sum + 8) >> 4;
- }
- else
- {
- sum = 128;
- }
-
- temp = sum | (sum << 8);
- temp |= (temp << 16);
-
- for (i = 0; i < 16; i++)
- {
- *((uint32*)pred) = temp;
- *((uint32*)(pred + 4)) = temp;
- *((uint32*)(pred + 8)) = temp;
- *((uint32*)(pred + 12)) = temp;
- pred += pred_pitch;
- }
-
-}
-
-void Intra_16x16_Plane(AVCCommonObj *video, int pitch)
-{
- int i, a_16, b, c, factor_c;
- uint8 *comp_ref_x = video->pintra_pred_top;
- uint8 *comp_ref_y = video->pintra_pred_left;
- uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
- int H = 0, V = 0 , tmp;
- uint8 *pred = video->pred_block;
- uint32 temp;
- uint8 byte1, byte2, byte3;
- int value;
- int pred_pitch = video->pred_pitch;
-
- comp_ref_x0 = comp_ref_x + 8;
- comp_ref_x1 = comp_ref_x + 6;
- comp_ref_y0 = comp_ref_y + (pitch << 3);
- comp_ref_y1 = comp_ref_y + 6 * pitch;
-
- for (i = 1; i < 8; i++)
- {
- H += i * (*comp_ref_x0++ - *comp_ref_x1--);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
- comp_ref_y0 += pitch;
- comp_ref_y1 -= pitch;
- }
-
- H += i * (*comp_ref_x0++ - video->intra_pred_topleft);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
-
-
- a_16 = ((*(comp_ref_x + 15) + *(comp_ref_y + 15 * pitch)) << 4) + 16;;
- b = (5 * H + 32) >> 6;
- c = (5 * V + 32) >> 6;
-
- tmp = 0;
-
- for (i = 0; i < 16; i++)
- {
- factor_c = a_16 + c * (tmp++ - 7);
-
- factor_c -= 7 * b;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)pred) = temp;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)(pred + 4)) = temp;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)(pred + 8)) = temp;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)(pred + 12)) = temp;
- pred += pred_pitch;
- }
-}
-
-/************** Chroma intra prediction *********************/
-
-void Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)
-{
- int i;
- uint32 temp, temp2, pred_a, pred_b;
- uint8 *comp_ref_x, *comp_ref_y;
- uint8 *comp_ref_cb_x = video->pintra_pred_top_cb;
- uint8 *comp_ref_cb_y = video->pintra_pred_left_cb;
- uint8 *comp_ref_cr_x = video->pintra_pred_top_cr;
- uint8 *comp_ref_cr_y = video->pintra_pred_left_cr;
- int component, j;
- int sum_x0, sum_x1, sum_y0, sum_y1;
- int pred_0[2], pred_1[2], pred_2[2], pred_3[2];
- int pred_pitch = video->pred_pitch;
- uint8 *pred;
-
- if (video->intraAvailB & video->intraAvailA)
- {
- comp_ref_x = comp_ref_cb_x;
- comp_ref_y = comp_ref_cb_y;
- for (i = 0; i < 2; i++)
- {
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- temp += (temp >> 16);
- sum_x0 = temp & 0xFFFF;
-
- temp = *((uint32*)comp_ref_x);
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- temp += (temp >> 16);
- sum_x1 = temp & 0xFFFF;
-
- pred_1[i] = (sum_x1 + 2) >> 2;
-
- sum_y0 = *comp_ref_y;
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
-
- sum_y1 = *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
-
- pred_2[i] = (sum_y1 + 2) >> 2;
-
- pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3;
- pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3;
-
- comp_ref_x = comp_ref_cr_x;
- comp_ref_y = comp_ref_cr_y;
- }
- }
-
- else if (video->intraAvailA)
- {
- comp_ref_y = comp_ref_cb_y;
- for (i = 0; i < 2; i++)
- {
- sum_y0 = *comp_ref_y;
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
-
- sum_y1 = *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
-
- pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2;
- pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2;
- comp_ref_y = comp_ref_cr_y;
- }
- }
- else if (video->intraAvailB)
- {
- comp_ref_x = comp_ref_cb_x;
- for (i = 0; i < 2; i++)
- {
- temp = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- temp += (temp >> 16);
- sum_x0 = temp & 0xFFFF;
-
- temp = *((uint32*)comp_ref_x);
- temp2 = (temp >> 8) & 0xFF00FF;
- temp &= 0xFF00FF;
- temp += temp2;
- temp += (temp >> 16);
- sum_x1 = temp & 0xFFFF;
-
- pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2;
- pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2;
- comp_ref_x = comp_ref_cr_x;
- }
- }
- else
- {
- pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] =
- pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128;
- }
-
- pred = predCb;
- for (component = 0; component < 2; component++)
- {
- pred_a = pred_0[component];
- pred_b = pred_1[component];
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- pred_b |= (pred_b << 8);
- pred_b |= (pred_b << 16);
-
- for (i = 4; i < 6; i++)
- {
- for (j = 0; j < 4; j++) /* 4 lines */
- {
- *((uint32*)pred) = pred_a;
- *((uint32*)(pred + 4)) = pred_b;
- pred += pred_pitch; /* move to the next line */
- }
- pred_a = pred_2[component];
- pred_b = pred_3[component];
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- pred_b |= (pred_b << 8);
- pred_b |= (pred_b << 16);
- }
- pred = predCr; /* point to cr */
- }
-}
-
-void Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)
-{
- int i;
- uint32 temp;
- uint8 *comp_ref_cb_y = video->pintra_pred_left_cb;
- uint8 *comp_ref_cr_y = video->pintra_pred_left_cr;
- uint8 *comp;
- int component, j;
- int pred_pitch = video->pred_pitch;
- uint8 *pred;
-
- comp = comp_ref_cb_y;
- pred = predCb;
- for (component = 0; component < 2; component++)
- {
- for (i = 4; i < 6; i++)
- {
- for (j = 0; j < 4; j++)
- {
- temp = *comp;
- comp += pitch;
- temp |= (temp << 8);
- temp |= (temp << 16);
- *((uint32*)pred) = temp;
- *((uint32*)(pred + 4)) = temp;
- pred += pred_pitch;
- }
- }
- comp = comp_ref_cr_y;
- pred = predCr; /* point to cr */
- }
-
-}
-
-void Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr)
-{
- uint32 temp1, temp2;
- uint8 *comp_ref_cb_x = video->pintra_pred_top_cb;
- uint8 *comp_ref_cr_x = video->pintra_pred_top_cr;
- uint8 *comp_ref;
- int component, j;
- int pred_pitch = video->pred_pitch;
- uint8 *pred;
-
- comp_ref = comp_ref_cb_x;
- pred = predCb;
- for (component = 0; component < 2; component++)
- {
- temp1 = *((uint32*)comp_ref);
- temp2 = *((uint32*)(comp_ref + 4));
- for (j = 0; j < 8; j++)
- {
- *((uint32*)pred) = temp1;
- *((uint32*)(pred + 4)) = temp2;
- pred += pred_pitch;
- }
- comp_ref = comp_ref_cr_x;
- pred = predCr; /* point to cr */
- }
-
-}
-
-void Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)
-{
- int i;
- int a_16_C[2], b_C[2], c_C[2], a_16, b, c, factor_c;
- uint8 *comp_ref_x, *comp_ref_y, *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
- int component, j;
- int H, V, tmp;
- uint32 temp;
- uint8 byte1, byte2, byte3;
- int value;
- uint8 topleft;
- int pred_pitch = video->pred_pitch;
- uint8 *pred;
-
- comp_ref_x = video->pintra_pred_top_cb;
- comp_ref_y = video->pintra_pred_left_cb;
- topleft = video->intra_pred_topleft_cb;
-
- for (component = 0; component < 2; component++)
- {
- H = V = 0;
- comp_ref_x0 = comp_ref_x + 4;
- comp_ref_x1 = comp_ref_x + 2;
- comp_ref_y0 = comp_ref_y + (pitch << 2);
- comp_ref_y1 = comp_ref_y + (pitch << 1);
- for (i = 1; i < 4; i++)
- {
- H += i * (*comp_ref_x0++ - *comp_ref_x1--);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
- comp_ref_y0 += pitch;
- comp_ref_y1 -= pitch;
- }
- H += i * (*comp_ref_x0++ - topleft);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
-
- a_16_C[component] = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16;
- b_C[component] = (17 * H + 16) >> 5;
- c_C[component] = (17 * V + 16) >> 5;
-
- comp_ref_x = video->pintra_pred_top_cr;
- comp_ref_y = video->pintra_pred_left_cr;
- topleft = video->intra_pred_topleft_cr;
- }
-
- pred = predCb;
- for (component = 0; component < 2; component++)
- {
- a_16 = a_16_C[component];
- b = b_C[component];
- c = c_C[component];
- tmp = 0;
- for (i = 4; i < 6; i++)
- {
- for (j = 0; j < 4; j++)
- {
- factor_c = a_16 + c * (tmp++ - 3);
-
- factor_c -= 3 * b;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)pred) = temp;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte2 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- byte3 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- temp = byte1 | (byte2 << 8);
- temp |= (byte3 << 16);
- temp |= (value << 24);
- *((uint32*)(pred + 4)) = temp;
- pred += pred_pitch;
- }
- }
- pred = predCr; /* point to cr */
- }
-}
-
diff --git a/media/libstagefright/codecs/avc/dec/src/residual.cpp b/media/libstagefright/codecs/avc/dec/src/residual.cpp
deleted file mode 100644
index c68550d..0000000
--- a/media/libstagefright/codecs/avc/dec/src/residual.cpp
+++ /dev/null
@@ -1,523 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-
-#include <string.h>
-
-#include "avcdec_lib.h"
-#include "avcdec_bitstream.h"
-
-AVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream)
-{
- AVCDec_Status status;
- int j;
- int mb_x, mb_y, offset1;
- uint8 *pDst;
- uint32 byte0, byte1;
- int pitch;
-
- mb_x = video->mb_x;
- mb_y = video->mb_y;
-
-#ifdef USE_PRED_BLOCK
- pDst = video->pred_block + 84;
- pitch = 20;
-#else
- offset1 = (mb_x << 4) + (mb_y << 4) * video->PicWidthInSamplesL;
- pDst = video->currPic->Sl + offset1;
- pitch = video->currPic->pitch;
-#endif
-
- /* at this point bitstream is byte-aligned */
- j = 16;
- while (j > 0)
- {
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)pDst) = byte0;
-
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)(pDst + 4)) = byte0;
-
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)(pDst + 8)) = byte0;
-
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)(pDst + 12)) = byte0;
- j--;
- pDst += pitch;
-
- if (status != AVCDEC_SUCCESS) /* check only once per line */
- return status;
- }
-
-#ifdef USE_PRED_BLOCK
- pDst = video->pred_block + 452;
- pitch = 12;
-#else
- offset1 = (offset1 >> 2) + (mb_x << 2);
- pDst = video->currPic->Scb + offset1;
- pitch >>= 1;
-#endif
-
- j = 8;
- while (j > 0)
- {
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)pDst) = byte0;
-
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)(pDst + 4)) = byte0;
-
- j--;
- pDst += pitch;
-
- if (status != AVCDEC_SUCCESS) /* check only once per line */
- return status;
- }
-
-#ifdef USE_PRED_BLOCK
- pDst = video->pred_block + 596;
- pitch = 12;
-#else
- pDst = video->currPic->Scr + offset1;
-#endif
- j = 8;
- while (j > 0)
- {
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)pDst) = byte0;
-
- status = BitstreamReadBits(stream, 8, (uint*) & byte0);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 8);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 16);
- status = BitstreamReadBits(stream, 8, (uint*) & byte1);
- byte0 |= (byte1 << 24);
- *((uint32*)(pDst + 4)) = byte0;
-
- j--;
- pDst += pitch;
-
- if (status != AVCDEC_SUCCESS) /* check only once per line */
- return status;
- }
-
-#ifdef MB_BASED_DEBLOCK
- SaveNeighborForIntraPred(video, offset1);
-#endif
-
- return AVCDEC_SUCCESS;
-}
-
-
-
-/* see subclause 7.3.5.3 and readCBPandCoeffsFromNAL() in JM*/
-AVCDec_Status residual(AVCDecObject *decvid, AVCMacroblock *currMB)
-{
- AVCCommonObj *video = decvid->common;
- int16 *block;
- int level[16], run[16], numcoeff; /* output from residual_block_cavlc */
- int block_x, i, j, k, idx, iCbCr;
- int mbPartIdx, subMbPartIdx, mbPartIdx_X, mbPartIdx_Y;
- int nC, maxNumCoeff = 16;
- int coeffNum, start_scan = 0;
- uint8 *zz_scan;
- int Rq, Qq;
- uint32 cbp4x4 = 0;
-
- /* in 8.5.4, it only says if it's field macroblock. */
-
- zz_scan = (uint8*) ZZ_SCAN_BLOCK;
-
-
- /* see 8.5.8 for the initialization of these values */
- Qq = video->QPy_div_6;
- Rq = video->QPy_mod_6;
-
- memset(video->block, 0, sizeof(int16)*NUM_PIXELS_IN_MB);
-
- if (currMB->mbMode == AVC_I16)
- {
- nC = predict_nnz(video, 0, 0);
- decvid->residual_block(decvid, nC, 16, level, run, &numcoeff);
- /* then performs zigzag and transform */
- block = video->block;
- coeffNum = -1;
- for (i = numcoeff - 1; i >= 0; i--)
- {
- coeffNum += run[i] + 1;
- if (coeffNum > 15)
- {
- return AVCDEC_FAIL;
- }
- idx = zz_scan[coeffNum] << 2;
- /* idx = ((idx>>2)<<6) + ((idx&3)<<2); */
- block[idx] = level[i];
- }
-
- /* inverse transform on Intra16x16DCLevel */
- if (numcoeff)
- {
- Intra16DCTrans(block, Qq, Rq);
- cbp4x4 = 0xFFFF;
- }
- maxNumCoeff = 15;
- start_scan = 1;
- }
-
- memset(currMB->nz_coeff, 0, sizeof(uint8)*24);
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- mbPartIdx_X = (mbPartIdx & 1) << 1;
- mbPartIdx_Y = mbPartIdx & -2;
-
- if (currMB->CBP&(1 << mbPartIdx))
- {
- for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++)
- {
- i = mbPartIdx_X + (subMbPartIdx & 1); // check this
- j = mbPartIdx_Y + (subMbPartIdx >> 1);
- block = video->block + (j << 6) + (i << 2); //
- nC = predict_nnz(video, i, j);
- decvid->residual_block(decvid, nC, maxNumCoeff, level, run, &numcoeff);
-
- /* convert to raster scan and quantize*/
- /* Note: for P mb in SP slice and SI mb in SI slice,
- the quantization cannot be done here.
- block[idx] should be assigned with level[k].
- itrans will be done after the prediction.
- There will be transformation on the predicted value,
- then addition with block[idx], then this quantization
- and transform.*/
-
- coeffNum = -1 + start_scan;
- for (k = numcoeff - 1; k >= 0; k--)
- {
- coeffNum += run[k] + 1;
- if (coeffNum > 15)
- {
- return AVCDEC_FAIL;
- }
- idx = zz_scan[coeffNum];
- block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq ;
- }
-
- currMB->nz_coeff[(j<<2)+i] = numcoeff;
- if (numcoeff)
- {
- cbp4x4 |= (1 << ((j << 2) + i));
- }
- }
- }
- }
-
- Qq = video->QPc_div_6;
- Rq = video->QPc_mod_6;
-
- if (currMB->CBP & (3 << 4)) /* chroma DC residual present */
- {
- for (iCbCr = 0; iCbCr < 2; iCbCr++)
- {
- decvid->residual_block(decvid, -1, 4, level, run, &numcoeff);
- block = video->block + 256 + (iCbCr << 3);
- coeffNum = -1;
- for (i = numcoeff - 1; i >= 0; i--)
- {
- coeffNum += run[i] + 1;
- if (coeffNum > 3)
- {
- return AVCDEC_FAIL;
- }
- block[(coeffNum>>1)*64 + (coeffNum&1)*4] = level[i];
- }
- /* inverse transform on chroma DC */
- /* for P in SP and SI in SI, this function can't be done here,
- must do prediction transform/quant first. */
- if (numcoeff)
- {
- ChromaDCTrans(block, Qq, Rq);
- cbp4x4 |= (iCbCr ? 0xcc0000 : 0x330000);
- }
- }
- }
-
- if (currMB->CBP & (2 << 4))
- {
- for (block_x = 0; block_x < 4; block_x += 2) /* for iCbCr */
- {
- for (j = 4; j < 6; j++) /* for each block inside Cb or Cr */
- {
- for (i = block_x; i < block_x + 2; i++)
- {
-
- block = video->block + (j << 6) + (i << 2);
-
- nC = predict_nnz_chroma(video, i, j);
- decvid->residual_block(decvid, nC, 15, level, run, &numcoeff);
-
- /* convert to raster scan and quantize */
- /* for P MB in SP slice and SI MB in SI slice,
- the dequant and transform cannot be done here.
- It needs the prediction values. */
- coeffNum = 0;
- for (k = numcoeff - 1; k >= 0; k--)
- {
- coeffNum += run[k] + 1;
- if (coeffNum > 15)
- {
- return AVCDEC_FAIL;
- }
- idx = zz_scan[coeffNum];
- block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq;
- }
-
-
- /* then transform */
- // itrans(block); /* transform */
- currMB->nz_coeff[(j<<2)+i] = numcoeff; //
- if (numcoeff)
- {
- cbp4x4 |= (1 << ((j << 2) + i));
- }
- }
-
- }
- }
- }
-
- video->cbp4x4 = cbp4x4;
-
- return AVCDEC_SUCCESS;
-}
-
-/* see subclause 7.3.5.3.1 and 9.2 and readCoeff4x4_CAVLC() in JM */
-AVCDec_Status residual_block_cavlc(AVCDecObject *decvid, int nC, int maxNumCoeff,
- int *level, int *run, int *numcoeff)
-{
- int i, j;
- int TrailingOnes, TotalCoeff;
- AVCDecBitstream *stream = decvid->bitstream;
- int suffixLength;
- uint trailing_ones_sign_flag, level_prefix, level_suffix;
- int levelCode, levelSuffixSize, zerosLeft;
- int run_before;
-
-
- if (nC >= 0)
- {
- ce_TotalCoeffTrailingOnes(stream, &TrailingOnes, &TotalCoeff, nC);
- }
- else
- {
- ce_TotalCoeffTrailingOnesChromaDC(stream, &TrailingOnes, &TotalCoeff);
- }
-
- *numcoeff = TotalCoeff;
-
- /* This part is done quite differently in ReadCoef4x4_CAVLC() */
- if (TotalCoeff == 0)
- {
- return AVCDEC_SUCCESS;
- }
-
- if (TrailingOnes) /* keep reading the sign of those trailing ones */
- {
- /* instead of reading one bit at a time, read the whole thing at once */
- BitstreamReadBits(stream, TrailingOnes, &trailing_ones_sign_flag);
- trailing_ones_sign_flag <<= 1;
- for (i = 0; i < TrailingOnes; i++)
- {
- level[i] = 1 - ((trailing_ones_sign_flag >> (TrailingOnes - i - 1)) & 2);
- }
- }
-
- i = TrailingOnes;
- suffixLength = 1;
- if (TotalCoeff > TrailingOnes)
- {
- ce_LevelPrefix(stream, &level_prefix);
- if (TotalCoeff < 11 || TrailingOnes == 3)
- {
- if (level_prefix < 14)
- {
-// levelSuffixSize = 0;
- levelCode = level_prefix;
- }
- else if (level_prefix == 14)
- {
-// levelSuffixSize = 4;
- BitstreamReadBits(stream, 4, &level_suffix);
- levelCode = 14 + level_suffix;
- }
- else /* if (level_prefix == 15) */
- {
-// levelSuffixSize = 12;
- BitstreamReadBits(stream, 12, &level_suffix);
- levelCode = 30 + level_suffix;
- }
- }
- else
- {
- /* suffixLength = 1; */
- if (level_prefix < 15)
- {
- levelSuffixSize = suffixLength;
- }
- else
- {
- levelSuffixSize = 12;
- }
- BitstreamReadBits(stream, levelSuffixSize, &level_suffix);
-
- levelCode = (level_prefix << 1) + level_suffix;
- }
-
- if (TrailingOnes < 3)
- {
- levelCode += 2;
- }
-
- level[i] = (levelCode + 2) >> 1;
- if (level[i] > 3)
- {
- suffixLength = 2;
- }
-
- if (levelCode & 1)
- {
- level[i] = -level[i];
- }
- i++;
-
- }
-
- for (j = TotalCoeff - i; j > 0 ; j--)
- {
- ce_LevelPrefix(stream, &level_prefix);
- if (level_prefix < 15)
- {
- levelSuffixSize = suffixLength;
- }
- else
- {
- levelSuffixSize = 12;
- }
- BitstreamReadBits(stream, levelSuffixSize, &level_suffix);
-
- levelCode = (level_prefix << suffixLength) + level_suffix;
- level[i] = (levelCode >> 1) + 1;
- if (level[i] > (3 << (suffixLength - 1)) && suffixLength < 6)
- {
- suffixLength++;
- }
- if (levelCode & 1)
- {
- level[i] = -level[i];
- }
- i++;
- }
-
-
- if (TotalCoeff < maxNumCoeff)
- {
- if (nC >= 0)
- {
- ce_TotalZeros(stream, &zerosLeft, TotalCoeff);
- }
- else
- {
- ce_TotalZerosChromaDC(stream, &zerosLeft, TotalCoeff);
- }
- }
- else
- {
- zerosLeft = 0;
- }
-
- for (i = 0; i < TotalCoeff - 1; i++)
- {
- if (zerosLeft > 0)
- {
- ce_RunBefore(stream, &run_before, zerosLeft);
- run[i] = run_before;
- }
- else
- {
- run[i] = 0;
- zerosLeft = 0; // could be negative under error conditions
- }
-
- zerosLeft = zerosLeft - run[i];
- }
-
- if (zerosLeft < 0)
- {
- zerosLeft = 0;
-// return AVCDEC_FAIL;
- }
-
- run[TotalCoeff-1] = zerosLeft;
-
- /* leave the inverse zigzag scan part for the caller */
-
-
- return AVCDEC_SUCCESS;
-}
diff --git a/media/libstagefright/codecs/avc/dec/src/slice.cpp b/media/libstagefright/codecs/avc/dec/src/slice.cpp
deleted file mode 100644
index 7a2ef3d..0000000
--- a/media/libstagefright/codecs/avc/dec/src/slice.cpp
+++ /dev/null
@@ -1,772 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/* Note for optimization: syntax decoding or operations related to B_SLICE should be
-commented out by macro definition or function pointers. */
-
-#include <string.h>
-
-#include "avcdec_lib.h"
-#include "avcdec_bitstream.h"
-
-const static int mbPart2raster[3][4] = {{0, 0, 0, 0}, {1, 1, 0, 0}, {1, 0, 1, 0}};
-/* decode_frame_slice() */
-/* decode_one_slice() */
-AVCDec_Status DecodeSlice(AVCDecObject *decvid)
-{
- AVCDec_Status status;
- AVCCommonObj *video = decvid->common;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCMacroblock *currMB ;
- AVCDecBitstream *stream = decvid->bitstream;
- uint slice_group_id;
- uint CurrMbAddr, moreDataFlag;
-
- /* set the first mb in slice */
- CurrMbAddr = sliceHdr->first_mb_in_slice;
- slice_group_id = video->MbToSliceGroupMap[CurrMbAddr];
-
- if ((CurrMbAddr && (CurrMbAddr != (uint)(video->mbNum + 1))) && video->currSeqParams->constrained_set1_flag == 1)
- {
- ConcealSlice(decvid, video->mbNum, CurrMbAddr);
- }
-
- moreDataFlag = 1;
- video->mb_skip_run = -1;
-
-
- /* while loop , see subclause 7.3.4 */
- do
- {
- if (CurrMbAddr >= video->PicSizeInMbs)
- {
- return AVCDEC_FAIL;
- }
-
- currMB = video->currMB = &(video->mblock[CurrMbAddr]);
- video->mbNum = CurrMbAddr;
- currMB->slice_id = video->slice_id; // slice
-
- /* we can remove this check if we don't support Mbaff. */
- /* we can wrap below into an initMB() function which will also
- do necessary reset of macroblock related parameters. */
-
- video->mb_x = CurrMbAddr % video->PicWidthInMbs;
- video->mb_y = CurrMbAddr / video->PicWidthInMbs;
-
- /* check the availability of neighboring macroblocks */
- InitNeighborAvailability(video, CurrMbAddr);
-
- /* read_macroblock and decode_one_macroblock() */
- status = DecodeMB(decvid);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
-#ifdef MB_BASED_DEBLOCK
- if (video->currPicParams->num_slice_groups_minus1 == 0)
- {
- MBInLoopDeblock(video); /* MB-based deblocking */
- }
- else /* this mode cannot be used if the number of slice group is not one. */
- {
- return AVCDEC_FAIL;
- }
-#endif
- video->numMBs--;
-
- moreDataFlag = more_rbsp_data(stream);
-
-
- /* go to next MB */
- while (++CurrMbAddr < video->PicSizeInMbs && video->MbToSliceGroupMap[CurrMbAddr] != (int)slice_group_id)
- {
- }
-
- }
- while ((moreDataFlag && video->numMBs > 0) || video->mb_skip_run > 0); /* even if no more data, but last few MBs are skipped */
-
- if (video->numMBs == 0)
- {
- video->newPic = TRUE;
- video->mbNum = 0; // _Conceal
- return AVCDEC_PICTURE_READY;
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/* read MB mode and motion vectors */
-/* perform Intra/Inter prediction and residue */
-/* update video->mb_skip_run */
-AVCDec_Status DecodeMB(AVCDecObject *decvid)
-{
- AVCDec_Status status;
- AVCCommonObj *video = decvid->common;
- AVCDecBitstream *stream = decvid->bitstream;
- AVCMacroblock *currMB = video->currMB;
- uint mb_type;
- int slice_type = video->slice_type;
- int temp;
-
- currMB->QPy = video->QPy;
- currMB->QPc = video->QPc;
-
- if (slice_type == AVC_P_SLICE)
- {
- if (video->mb_skip_run < 0)
- {
- ue_v(stream, (uint *)&(video->mb_skip_run));
- }
-
- if (video->mb_skip_run == 0)
- {
- /* this will not handle the case where the slice ends with a mb_skip_run == 0 and no following MB data */
- ue_v(stream, &mb_type);
- if (mb_type > 30)
- {
- return AVCDEC_FAIL;
- }
- InterpretMBModeP(currMB, mb_type);
- video->mb_skip_run = -1;
- }
- else
- {
- /* see subclause 7.4.4 for more details on how
- mb_field_decoding_flag is derived in case of skipped MB */
-
- currMB->mb_intra = FALSE;
-
- currMB->mbMode = AVC_SKIP;
- currMB->MbPartWidth = currMB->MbPartHeight = 16;
- currMB->NumMbPart = 1;
- currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =
- currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1; //
- currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =
- currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;
- currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =
- currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;
-
- memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB);
-
- currMB->CBP = 0;
- video->cbp4x4 = 0;
- /* for skipped MB, always look at the first entry in RefPicList */
- currMB->RefIdx[0] = currMB->RefIdx[1] =
- currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;
- InterMBPrediction(video);
- video->mb_skip_run--;
- return AVCDEC_SUCCESS;
- }
-
- }
- else
- {
- /* Then decode mode and MV */
- ue_v(stream, &mb_type);
- if (mb_type > 25)
- {
- return AVCDEC_FAIL;
- }
- InterpretMBModeI(currMB, mb_type);
- }
-
-
- if (currMB->mbMode != AVC_I_PCM)
- {
-
- if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)
- {
- status = sub_mb_pred(video, currMB, stream);
- }
- else
- {
- status = mb_pred(video, currMB, stream) ;
- }
-
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
-
- if (currMB->mbMode != AVC_I16)
- {
- /* decode coded_block_pattern */
- status = DecodeCBP(currMB, stream);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
- }
-
- if (currMB->CBP > 0 || currMB->mbMode == AVC_I16)
- {
- se_v(stream, &temp);
- if (temp)
- {
- temp += (video->QPy + 52);
- currMB->QPy = video->QPy = temp - 52 * (temp * 79 >> 12);
- if (currMB->QPy > 51 || currMB->QPy < 0)
- {
- video->QPy = AVC_CLIP3(0, 51, video->QPy);
-// return AVCDEC_FAIL;
- }
- video->QPy_div_6 = (video->QPy * 43) >> 8;
- video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6;
- currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)];
- video->QPc_div_6 = (video->QPc * 43) >> 8;
- video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;
- }
- }
- /* decode residue and inverse transform */
- status = residual(decvid, currMB);
- if (status != AVCDEC_SUCCESS)
- {
- return status;
- }
- }
- else
- {
- if (stream->bitcnt & 7)
- {
- BitstreamByteAlign(stream);
- }
- /* decode pcm_byte[i] */
- DecodeIntraPCM(video, stream);
-
- currMB->QPy = 0; /* necessary for deblocking */ // _OPTIMIZE
- currMB->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->currPicParams->chroma_qp_index_offset)];
-
- /* default values, don't know if really needed */
- currMB->CBP = 0x3F;
- video->cbp4x4 = 0xFFFF;
- currMB->mb_intra = TRUE;
- memset(currMB->nz_coeff, 16, sizeof(uint8)*NUM_BLKS_IN_MB);
- return AVCDEC_SUCCESS;
- }
-
-
- /* do Intra/Inter prediction, together with the residue compensation */
- /* This part should be common between the skip and no-skip */
- if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)
- {
- IntraMBPrediction(video);
- }
- else
- {
- InterMBPrediction(video);
- }
-
-
-
- return AVCDEC_SUCCESS;
-}
-
-/* see subclause 7.3.5.1 */
-AVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)
-{
- int mbPartIdx;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- uint max_ref_idx;
- const int *temp_0;
- int16 *temp_1;
- uint code;
-
- if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)
- {
-
- video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
-
- if (!video->currPicParams->constrained_intra_pred_flag)
- {
- video->intraAvailA = video->mbAvailA;
- video->intraAvailB = video->mbAvailB;
- video->intraAvailC = video->mbAvailC;
- video->intraAvailD = video->mbAvailD;
- }
- else
- {
- if (video->mbAvailA)
- {
- video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
- }
- if (video->mbAvailB)
- {
- video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
- }
- if (video->mbAvailC)
- {
- video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
- }
- if (video->mbAvailD)
- {
- video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
- }
- }
-
-
- if (currMB->mbMode == AVC_I4)
- {
- /* perform prediction to get the actual intra 4x4 pred mode */
- DecodeIntra4x4Mode(video, currMB, stream);
- /* output will be in currMB->i4Mode[4][4] */
- }
-
- ue_v(stream, &code);
-
- if (code > 3)
- {
- return AVCDEC_FAIL; /* out of range */
- }
- currMB->intra_chroma_pred_mode = (AVCIntraChromaPredMode)code;
- }
- else
- {
-
- memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
-// max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
- max_ref_idx = video->refList0Size - 1;
-
- /* decode ref index for L0 */
- if (sliceHdr->num_ref_idx_l0_active_minus1 > 0)
- {
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- te_v(stream, &code, max_ref_idx);
- if (code > (uint)max_ref_idx)
- {
- return AVCDEC_FAIL;
- }
- currMB->ref_idx_L0[mbPartIdx] = code;
- }
- }
-
- /* populate ref_idx_L0 */
- temp_0 = &mbPart2raster[currMB->mbMode-AVC_P16][0];
- temp_1 = &currMB->ref_idx_L0[3];
-
- *temp_1-- = currMB->ref_idx_L0[*temp_0++];
- *temp_1-- = currMB->ref_idx_L0[*temp_0++];
- *temp_1-- = currMB->ref_idx_L0[*temp_0++];
- *temp_1-- = currMB->ref_idx_L0[*temp_0++];
-
- /* Global reference index, these values are used in deblock */
- currMB->RefIdx[0] = video->RefPicList0[currMB->ref_idx_L0[0]]->RefIdx;
- currMB->RefIdx[1] = video->RefPicList0[currMB->ref_idx_L0[1]]->RefIdx;
- currMB->RefIdx[2] = video->RefPicList0[currMB->ref_idx_L0[2]]->RefIdx;
- currMB->RefIdx[3] = video->RefPicList0[currMB->ref_idx_L0[3]]->RefIdx;
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
- max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
- /* decode mvd_l0 */
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- se_v(stream, &(video->mvd_l0[mbPartIdx][0][0]));
- se_v(stream, &(video->mvd_l0[mbPartIdx][0][1]));
- }
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/* see subclause 7.3.5.2 */
-AVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)
-{
- int mbPartIdx, subMbPartIdx;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- uint max_ref_idx;
- uint sub_mb_type[4];
- uint code;
-
- memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- ue_v(stream, &(sub_mb_type[mbPartIdx]));
- if (sub_mb_type[mbPartIdx] > 3)
- {
- return AVCDEC_FAIL;
- }
-
- }
- /* we have to check the values to make sure they are valid */
- /* assign values to currMB->sub_mb_type[], currMB->MBPartPredMode[][x] */
-
- InterpretSubMBModeP(currMB, sub_mb_type);
-
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
-// max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
- max_ref_idx = video->refList0Size - 1;
-
- if (sliceHdr->num_ref_idx_l0_active_minus1 > 0 && currMB->mbMode != AVC_P8ref0)
- {
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- te_v(stream, (uint*)&code, max_ref_idx);
- if (code > max_ref_idx)
- {
- return AVCDEC_FAIL;
- }
- currMB->ref_idx_L0[mbPartIdx] = code;
- }
- }
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
-
- max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
- /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
- max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][0]));
- se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][1]));
- }
- /* used in deblocking */
- currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx;
- }
- return AVCDEC_SUCCESS;
-}
-
-void InterpretMBModeI(AVCMacroblock *mblock, uint mb_type)
-{
- mblock->NumMbPart = 1;
-
- mblock->mb_intra = TRUE;
-
- if (mb_type == 0) /* I_4x4 */
- {
- mblock->mbMode = AVC_I4;
- }
- else if (mb_type < 25) /* I_PCM */
- {
- mblock->mbMode = AVC_I16;
- mblock->i16Mode = (AVCIntra16x16PredMode)((mb_type - 1) & 0x3);
- if (mb_type > 12)
- {
- mblock->CBP = (((mb_type - 13) >> 2) << 4) + 0x0F;
- }
- else
- {
- mblock->CBP = ((mb_type - 1) >> 2) << 4;
- }
- }
- else
- {
- mblock->mbMode = AVC_I_PCM;
- }
-
- return ;
-}
-
-void InterpretMBModeP(AVCMacroblock *mblock, uint mb_type)
-{
- const static int map2PartWidth[5] = {16, 16, 8, 8, 8};
- const static int map2PartHeight[5] = {16, 8, 16, 8, 8};
- const static int map2NumPart[5] = {1, 2, 2, 4, 4};
- const static AVCMBMode map2mbMode[5] = {AVC_P16, AVC_P16x8, AVC_P8x16, AVC_P8, AVC_P8ref0};
-
- mblock->mb_intra = FALSE;
- if (mb_type < 5)
- {
- mblock->mbMode = map2mbMode[mb_type];
- mblock->MbPartWidth = map2PartWidth[mb_type];
- mblock->MbPartHeight = map2PartHeight[mb_type];
- mblock->NumMbPart = map2NumPart[mb_type];
- mblock->NumSubMbPart[0] = mblock->NumSubMbPart[1] =
- mblock->NumSubMbPart[2] = mblock->NumSubMbPart[3] = 1;
- mblock->SubMbPartWidth[0] = mblock->SubMbPartWidth[1] =
- mblock->SubMbPartWidth[2] = mblock->SubMbPartWidth[3] = mblock->MbPartWidth;
- mblock->SubMbPartHeight[0] = mblock->SubMbPartHeight[1] =
- mblock->SubMbPartHeight[2] = mblock->SubMbPartHeight[3] = mblock->MbPartHeight;
- }
- else
- {
- InterpretMBModeI(mblock, mb_type - 5);
- /* set MV and Ref_Idx codes of Intra blocks in P-slices */
- memset(mblock->mvL0, 0, sizeof(int32)*16);
- mblock->ref_idx_L0[0] = mblock->ref_idx_L0[1] = mblock->ref_idx_L0[2] = mblock->ref_idx_L0[3] = -1;
- }
- return ;
-}
-
-void InterpretMBModeB(AVCMacroblock *mblock, uint mb_type)
-{
- const static int map2PartWidth[23] = {8, 16, 16, 16, 16, 8, 16, 8, 16, 8,
- 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 8
- };
- const static int map2PartHeight[23] = {8, 16, 16, 16, 8, 16, 8, 16, 8,
- 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8
- };
- /* see enum AVCMBType declaration */
- const static AVCMBMode map2mbMode[23] = {AVC_BDirect16, AVC_P16, AVC_P16, AVC_P16,
- AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16,
- AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16,
- AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P8
- };
- const static int map2PredMode1[23] = {3, 0, 1, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, -1};
- const static int map2PredMode2[23] = { -1, -1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2, -1};
- const static int map2NumPart[23] = { -1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4};
-
- mblock->mb_intra = FALSE;
-
- if (mb_type < 23)
- {
- mblock->mbMode = map2mbMode[mb_type];
- mblock->NumMbPart = map2NumPart[mb_type];
- mblock->MBPartPredMode[0][0] = (AVCPredMode)map2PredMode1[mb_type];
- if (mblock->NumMbPart > 1)
- {
- mblock->MBPartPredMode[1][0] = (AVCPredMode)map2PredMode2[mb_type];
- }
- mblock->MbPartWidth = map2PartWidth[mb_type];
- mblock->MbPartHeight = map2PartHeight[mb_type];
- }
- else
- {
- InterpretMBModeI(mblock, mb_type - 23);
- }
-
- return ;
-}
-
-void InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type)
-{
- mblock->mb_intra = TRUE;
-
- if (mb_type == 0)
- {
- mblock->mbMode = AVC_SI4;
- /* other values are N/A */
- }
- else
- {
- InterpretMBModeI(mblock, mb_type - 1);
- }
- return ;
-}
-
-/* input is mblock->sub_mb_type[] */
-void InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type)
-{
- int i, sub_type;
- /* see enum AVCMBType declaration */
-// const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4};
- const static int map2subPartWidth[4] = {8, 8, 4, 4};
- const static int map2subPartHeight[4] = {8, 4, 8, 4};
- const static int map2numSubPart[4] = {1, 2, 2, 4};
-
- for (i = 0; i < 4 ; i++)
- {
- sub_type = (int) sub_mb_type[i];
- // mblock->subMbMode[i] = map2subMbMode[sub_type];
- mblock->NumSubMbPart[i] = map2numSubPart[sub_type];
- mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type];
- mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type];
- }
-
- return ;
-}
-
-void InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type)
-{
- int i, j, sub_type;
- /* see enum AVCMBType declaration */
- const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8, AVC_8x8, AVC_8x8,
- AVC_8x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_4x4, AVC_4x4, AVC_4x4
- };
- const static int map2subPartWidth[13] = {4, 8, 8, 8, 8, 4, 8, 4, 8, 4, 4, 4, 4};
- const static int map2subPartHeight[13] = {4, 8, 8, 8, 4, 8, 4, 8, 4, 8, 4, 4, 4};
- const static int map2numSubPart[13] = {1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4};
- const static int map2predMode[13] = {3, 0, 1, 2, 0, 0, 1, 1, 2, 2, 0, 1, 2};
-
- for (i = 0; i < 4 ; i++)
- {
- sub_type = (int) sub_mb_type[i];
- mblock->subMbMode[i] = map2subMbMode[sub_type];
- mblock->NumSubMbPart[i] = map2numSubPart[sub_type];
- mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type];
- mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type];
- for (j = 0; j < 4; j++)
- {
- mblock->MBPartPredMode[i][j] = (AVCPredMode)map2predMode[sub_type];
- }
- }
-
- return ;
-}
-
-/* see subclause 8.3.1 */
-AVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)
-{
- int intra4x4PredModeA = 0, intra4x4PredModeB = 0, predIntra4x4PredMode = 0;
- int component, SubBlock_indx, block_x, block_y;
- int dcOnlyPredictionFlag;
- uint prev_intra4x4_pred_mode_flag[16];
- int rem_intra4x4_pred_mode[16];
- int bindx = 0;
-
- for (component = 0; component < 4; component++) /* partition index */
- {
- block_x = ((component & 1) << 1);
- block_y = ((component >> 1) << 1);
-
- for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */
- {
- BitstreamRead1Bit(stream, &(prev_intra4x4_pred_mode_flag[bindx]));
-
- if (!prev_intra4x4_pred_mode_flag[bindx])
- {
- BitstreamReadBits(stream, 3, (uint*)&(rem_intra4x4_pred_mode[bindx]));
- }
-
- dcOnlyPredictionFlag = 0;
- if (block_x > 0)
- {
- intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ];
- }
- else
- {
- if (video->intraAvailA)
- {
- if (video->mblock[video->mbAddrA].mbMode == AVC_I4)
- {
- intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3];
- }
- else
- {
- intra4x4PredModeA = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- }
- }
-
- if (block_y > 0)
- {
- intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x];
- }
- else
- {
- if (video->intraAvailB)
- {
- if (video->mblock[video->mbAddrB].mbMode == AVC_I4)
- {
- intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x];
- }
- else
- {
- intra4x4PredModeB = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- }
- }
-
- if (dcOnlyPredictionFlag)
- {
- intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;
- }
-
- predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);
- if (prev_intra4x4_pred_mode_flag[bindx])
- {
- currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)predIntra4x4PredMode;
- }
- else
- {
- if (rem_intra4x4_pred_mode[bindx] < predIntra4x4PredMode)
- {
- currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)rem_intra4x4_pred_mode[bindx];
- }
- else
- {
- currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)(rem_intra4x4_pred_mode[bindx] + 1);
- }
- }
- bindx++;
- block_y += (SubBlock_indx & 1) ;
- block_x += (1 - 2 * (SubBlock_indx & 1)) ;
- }
- }
- return AVCDEC_SUCCESS;
-}
-AVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end)
-{
- AVCCommonObj *video = decvid->common;
- AVCMacroblock *currMB ;
-
- int CurrMbAddr;
-
- if (video->RefPicList0[0] == NULL)
- {
- return AVCDEC_FAIL;
- }
-
- for (CurrMbAddr = mbnum_start; CurrMbAddr < mbnum_end; CurrMbAddr++)
- {
- currMB = video->currMB = &(video->mblock[CurrMbAddr]);
- video->mbNum = CurrMbAddr;
- currMB->slice_id = video->slice_id++; // slice
-
- /* we can remove this check if we don't support Mbaff. */
- /* we can wrap below into an initMB() function which will also
- do necessary reset of macroblock related parameters. */
-
- video->mb_x = CurrMbAddr % video->PicWidthInMbs;
- video->mb_y = CurrMbAddr / video->PicWidthInMbs;
-
- /* check the availability of neighboring macroblocks */
- InitNeighborAvailability(video, CurrMbAddr);
-
- currMB->mb_intra = FALSE;
-
- currMB->mbMode = AVC_SKIP;
- currMB->MbPartWidth = currMB->MbPartHeight = 16;
-
- currMB->NumMbPart = 1;
- currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =
- currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1;
- currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =
- currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;
- currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =
- currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;
- currMB->QPy = 26;
- currMB->QPc = 26;
- memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB);
-
- currMB->CBP = 0;
- video->cbp4x4 = 0;
- /* for skipped MB, always look at the first entry in RefPicList */
- currMB->RefIdx[0] = currMB->RefIdx[1] =
- currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;
- InterMBPrediction(video);
-
- video->numMBs--;
-
- }
-
- return AVCDEC_SUCCESS;
-}
-
diff --git a/media/libstagefright/codecs/avc/dec/src/vlc.cpp b/media/libstagefright/codecs/avc/dec/src/vlc.cpp
deleted file mode 100644
index f531249..0000000
--- a/media/libstagefright/codecs/avc/dec/src/vlc.cpp
+++ /dev/null
@@ -1,815 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcdec_lib.h"
-#include "avcdec_bitstream.h"
-
-//#define PV_ARM_V5
-#ifdef PV_ARM_V5
-#define PV_CLZ(A,B) __asm{CLZ (A),(B)} \
- A -= 16;
-#else
-#define PV_CLZ(A,B) while (((B) & 0x8000) == 0) {(B) <<=1; A++;}
-#endif
-
-
-#define PV_NO_CLZ
-
-#ifndef PV_NO_CLZ
-typedef struct tagVLCNumCoeffTrail
-{
- int trailing;
- int total_coeff;
- int length;
-} VLCNumCoeffTrail;
-
-typedef struct tagShiftOffset
-{
- int shift;
- int offset;
-} ShiftOffset;
-
-const VLCNumCoeffTrail NumCoeffTrailOnes[3][67] =
-{
- {{0, 0, 1}, {1, 1, 2}, {2, 2, 3}, {1, 2, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 5, 7},
- {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {3, 6, 8}, {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 7, 9},
- {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 8, 10}, {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 9, 11},
- {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13},
- {2, 8, 13}, {1, 7, 13}, {0, 6, 13}, {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14},
- {2, 10, 14}, {1, 9, 14}, {0, 9, 14}, {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15},
- {2, 12, 15}, {1, 11, 15}, {0, 11, 15}, {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16},
- {2, 14, 16}, {1, 14, 16}, {0, 13, 16}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16}, {1, 13, 15},
- { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}},
-
- {{1, 1, 2}, {0, 0, 2}, {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {3, 6, 6}, {2, 3, 6},
- {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5}, {3, 7, 6}, {2, 4, 6},
- {1, 4, 6}, {0, 2, 6}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7}, {0, 5, 8}, {2, 6, 8},
- {1, 6, 8}, {0, 4, 8}, {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {3, 11, 11}, {2, 9, 11},
- {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11}, {0, 11, 12}, {2, 11, 12},
- {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12}, {3, 14, 13}, {2, 13, 13},
- {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13}, {1, 15, 14}, {0, 15, 14},
- {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13}, {3, 16, 14}, {2, 16, 14},
- {1, 16, 14}, {0, 16, 14}, {3, 15, 13}},
-
- {{3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4}, {0, 0, 4},
- {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5}, {1, 2, 5},
- {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6}, {0, 1, 6},
- {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7}, {0, 4, 7},
- {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8}, {0, 8, 8},
- {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9}, {0, 10, 9},
- {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9}, {1, 13, 9},
- {1, 16, 10}, {0, 15, 10}, {3, 15, 10}, {2, 15, 10}, {3, 16, 10}, {2, 16, 10}, {0, 16, 10}, { -1, -1, -1},
- { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}}
-};
-
-
-const ShiftOffset NumCoeffTrailOnes_indx[3][15] =
-{
- {{15, -1}, {14, 0}, {13, 1}, {10, -1}, {9, 3}, {8, 7}, {7, 11}, {6, 15},
- {5, 19}, {3, 19}, {2, 27}, {1, 35}, {0, 43}, {0, 55}, {1, 62}},
-
- {{14, -2}, {12, -2}, {10, -2}, {10, 10}, {9, 14}, {8, 18}, {7, 22}, {5, 22},
- {4, 30}, {3, 38}, {2, 46}, {2, 58}, {3, 65}, {16, 0}, {16, 0}},
-
- {{12, -8}, {11, 0}, {10, 8}, {9, 16}, {8, 24}, {7, 32}, {6, 40}, {6, 52},
- {6, 58}, {6, 61}, {16, 0}, {16, 0}, {16, 0}, {16, 0}, {16, 0}}
-};
-
-const static int nC_table[8] = {0, 0, 1, 1, 2, 2, 2, 2};
-
-#endif
-/**
-See algorithm in subclause 9.1, Table 9-1, Table 9-2. */
-AVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum)
-{
- uint temp, tmp_cnt;
- int leading_zeros = 0;
- BitstreamShowBits(bitstream, 16, &temp);
- tmp_cnt = temp | 0x1;
-
- PV_CLZ(leading_zeros, tmp_cnt)
-
- if (leading_zeros < 8)
- {
- *codeNum = (temp >> (15 - (leading_zeros << 1))) - 1;
- BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1);
- }
- else
- {
- BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp);
- *codeNum = temp - 1;
- }
-
- return AVCDEC_SUCCESS;
-}
-
-/**
-See subclause 9.1.1, Table 9-3 */
-AVCDec_Status se_v(AVCDecBitstream *bitstream, int *value)
-{
- uint temp, tmp_cnt;
- int leading_zeros = 0;
- BitstreamShowBits(bitstream, 16, &temp);
- tmp_cnt = temp | 0x1;
-
- PV_CLZ(leading_zeros, tmp_cnt)
-
- if (leading_zeros < 8)
- {
- temp >>= (15 - (leading_zeros << 1));
- BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1);
- }
- else
- {
- BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp);
- }
-
- *value = temp >> 1;
-
- if (temp & 0x01) // lsb is signed bit
- *value = -(*value);
-
-// leading_zeros = temp >> 1;
-// *value = leading_zeros - (leading_zeros*2*(temp&1));
-
- return AVCDEC_SUCCESS;
-}
-
-AVCDec_Status se_v32bit(AVCDecBitstream *bitstream, int32 *value)
-{
- int leadingZeros;
- uint32 infobits;
- uint32 codeNum;
-
- if (AVCDEC_SUCCESS != GetEGBitstring32bit(bitstream, &leadingZeros, &infobits))
- return AVCDEC_FAIL;
-
- codeNum = (1 << leadingZeros) - 1 + infobits;
-
- *value = (codeNum + 1) / 2;
-
- if ((codeNum & 0x01) == 0) // lsb is signed bit
- *value = -(*value);
-
- return AVCDEC_SUCCESS;
-}
-
-
-AVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range)
-{
- if (range > 1)
- {
- ue_v(bitstream, value);
- }
- else
- {
- BitstreamRead1Bit(bitstream, value);
- *value = 1 - (*value);
- }
- return AVCDEC_SUCCESS;
-}
-
-
-
-/* This function is only used for syntax with range from -2^31 to 2^31-1 */
-/* only a few of them in the SPS and PPS */
-AVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits)
-{
- int bit_value;
- uint info_temp;
-
- *leadingZeros = 0;
-
- BitstreamRead1Bit(bitstream, (uint*)&bit_value);
-
- while (!bit_value)
- {
- (*leadingZeros)++;
- BitstreamRead1Bit(bitstream, (uint*)&bit_value);
- }
-
- if (*leadingZeros > 0)
- {
- if (sizeof(uint) == 4) /* 32 bit machine */
- {
- BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp);
- *infobits = (uint32)info_temp;
- }
- else if (sizeof(uint) == 2) /* 16 bit machine */
- {
- *infobits = 0;
- if (*leadingZeros > 16)
- {
- BitstreamReadBits(bitstream, 16, (uint*)&info_temp);
- (*leadingZeros) -= 16;
- *infobits = ((uint32)info_temp) << (*leadingZeros);
- }
-
- BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp);
- *infobits |= (uint32)info_temp ;
- }
- }
- else
- *infobits = 0;
-
- return AVCDEC_SUCCESS;
-}
-
-/* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */
-const static uint8 MapCBP[48][2] =
-{
- {47, 0}, {31, 16}, {15, 1}, { 0, 2}, {23, 4}, {27, 8}, {29, 32}, {30, 3}, { 7, 5}, {11, 10}, {13, 12}, {14, 15},
- {39, 47}, {43, 7}, {45, 11}, {46, 13}, {16, 14}, { 3, 6}, { 5, 9}, {10, 31}, {12, 35}, {19, 37}, {21, 42}, {26, 44},
- {28, 33}, {35, 34}, {37, 36}, {42, 40}, {44, 39}, { 1, 43}, { 2, 45}, { 4, 46}, { 8, 17}, {17, 18}, {18, 20}, {20, 24},
- {24, 19}, { 6, 21}, { 9, 26}, {22, 28}, {25, 23}, {32, 27}, {33, 29}, {34, 30}, {36, 22}, {40, 25}, {38, 38}, {41, 41},
-};
-
-AVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream)
-{
- uint codeNum;
- uint coded_block_pattern;
-
- ue_v(stream, &codeNum);
-
- if (codeNum > 47)
- {
- return AVCDEC_FAIL;
- }
-
- /* can get rid of the if _OPTIMIZE */
- if (currMB->mbMode == AVC_I4)
- {
- coded_block_pattern = MapCBP[codeNum][0];
- }
- else
- {
- coded_block_pattern = MapCBP[codeNum][1];
- }
-
-// currMB->cbpL = coded_block_pattern&0xF; /* modulo 16 */
-// currMB->cbpC = coded_block_pattern>>4; /* divide 16 */
- currMB->CBP = coded_block_pattern;
-
- return AVCDEC_SUCCESS;
-}
-
-
-/* TO BE OPTIMIZED !!!!! */
-AVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC)
-{
-#ifdef PV_NO_CLZ
- const static uint8 TotCofNTrail1[75][3] = {{0, 0, 16}/*error */, {0, 0, 16}/*error */, {1, 13, 15}, {1, 13, 15}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16},
- {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16}, {2, 14, 16}, {1, 14, 16}, {0, 13, 16},
- {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15}, {2, 12, 15}, {1, 11, 15}, {0, 11, 15},
- {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14}, {2, 10, 14}, {1, 9, 14}, {0, 9, 14},
- {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13}, {2, 8, 13}, {1, 7, 13}, {0, 6, 13},
- {3, 9, 11}, {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {3, 8, 10},
- {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 7, 9}, {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 6, 8},
- {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 5, 7}, {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {1, 2, 6},
- {1, 2, 6}, {0, 1, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {2, 2, 3},
- {1, 1, 2}, {1, 1, 2}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}
- };
-
- const static uint8 TotCofNTrail2[84][3] = {{0, 0, 14 /* error */}, {0, 0, 14/*error */}, {3, 15, 13}, {3, 15, 13}, {3, 16, 14}, {2, 16, 14}, {1, 16, 14}, {0, 16, 14},
- {1, 15, 14}, {0, 15, 14}, {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13},
- {3, 14, 13}, {2, 13, 13}, {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13},
- {0, 11, 12}, {2, 11, 12}, {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12},
- {3, 11, 11}, {2, 9, 11}, {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11},
- {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {0, 5, 8}, {0, 5, 8}, {2, 6, 8}, {2, 6, 8},
- {1, 6, 8}, {1, 6, 8}, {0, 4, 8}, {0, 4, 8}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7},
- {3, 7, 6}, {3, 7, 6}, {2, 4, 6}, {2, 4, 6}, {1, 4, 6}, {1, 4, 6}, {0, 2, 6}, {0, 2, 6},
- {3, 6, 6}, {2, 3, 6}, {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5},
- {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2},
- {0, 0, 2}, {0, 0, 2}, {0, 0, 2}, {0, 0, 2}
- };
-
- const static uint8 TotCofNTrail3[64][3] = {{0, 0, 10/*error*/}, {0, 16, 10}, {3, 16, 10}, {2, 16, 10}, {1, 16, 10}, {0, 15, 10}, {3, 15, 10},
- {2, 15, 10}, {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9},
- {1, 13, 9}, {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9},
- {0, 10, 9}, {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8},
- {0, 8, 8}, {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7},
- {0, 4, 7}, {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6},
- {0, 1, 6}, {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5},
- {1, 2, 5}, {3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4},
- {0, 0, 4}
- };
-#endif
- uint code;
-
-#ifdef PV_NO_CLZ
- uint8 *pcode;
- if (nC < 2)
- {
- BitstreamShowBits(stream, 16, &code);
-
- if (code >= 8192)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>13)+65+2][0]);
- }
- else if (code >= 2048)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>9)+50+2][0]);
- }
- else if (code >= 1024)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>8)+46+2][0]);
- }
- else if (code >= 512)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>7)+42+2][0]);
- }
- else if (code >= 256)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>6)+38+2][0]);
- }
- else if (code >= 128)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>5)+34+2][0]);
- }
- else if (code >= 64)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>3)+22+2][0]);
- }
- else if (code >= 32)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>2)+14+2][0]);
- }
- else if (code >= 16)
- {
- pcode = (uint8*) & (TotCofNTrail1[(code>>1)+6+2][0]);
- }
- else
- {
- pcode = (uint8*) & (TotCofNTrail1[(code-2)+2][0]);
- }
-
- *TrailingOnes = pcode[0];
- *TotalCoeff = pcode[1];
-
- BitstreamFlushBits(stream, pcode[2]);
- }
- else if (nC < 4)
- {
- BitstreamShowBits(stream, 14, &code);
-
- if (code >= 4096)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>10)+66+2][0]);
- }
- else if (code >= 2048)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>8)+54+2][0]);
- }
- else if (code >= 512)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>7)+46+2][0]);
- }
- else if (code >= 128)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>5)+34+2][0]);
- }
- else if (code >= 64)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>3)+22+2][0]);
- }
- else if (code >= 32)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>2)+14+2][0]);
- }
- else if (code >= 16)
- {
- pcode = (uint8*) & (TotCofNTrail2[(code>>1)+6+2][0]);
- }
- else
- {
- pcode = (uint8*) & (TotCofNTrail2[code-2+2][0]);
- }
- *TrailingOnes = pcode[0];
- *TotalCoeff = pcode[1];
-
- BitstreamFlushBits(stream, pcode[2]);
- }
- else if (nC < 8)
- {
- BitstreamShowBits(stream, 10, &code);
-
- if (code >= 512)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>6)+47+1][0]);
- }
- else if (code >= 256)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>5)+39+1][0]);
- }
- else if (code >= 128)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>4)+31+1][0]);
- }
- else if (code >= 64)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>3)+23+1][0]);
- }
- else if (code >= 32)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>2)+15+1][0]);
- }
- else if (code >= 16)
- {
- pcode = (uint8*) & (TotCofNTrail3[(code>>1)+7+1][0]);
- }
- else
- {
- pcode = (uint8*) & (TotCofNTrail3[code-1+1][0]);
- }
- *TrailingOnes = pcode[0];
- *TotalCoeff = pcode[1];
-
- BitstreamFlushBits(stream, pcode[2]);
- }
- else
- {
- /* read 6 bit FLC */
- BitstreamReadBits(stream, 6, &code);
-
-
- *TrailingOnes = code & 3;
- *TotalCoeff = (code >> 2) + 1;
-
- if (*TotalCoeff > 16)
- {
- *TotalCoeff = 16; // _ERROR
- }
-
- if (code == 3)
- {
- *TrailingOnes = 0;
- (*TotalCoeff)--;
- }
- }
-#else
- const VLCNumCoeffTrail *ptr;
- const ShiftOffset *ptr_indx;
- uint temp, leading_zeros = 0;
-
- if (nC < 8)
- {
-
- BitstreamShowBits(stream, 16, &code);
- temp = code | 1;
-
- PV_CLZ(leading_zeros, temp)
-
- temp = nC_table[nC];
- ptr_indx = &NumCoeffTrailOnes_indx[temp][leading_zeros];
- ptr = &NumCoeffTrailOnes[temp][(code >> ptr_indx->shift) + ptr_indx->offset];
- *TrailingOnes = ptr->trailing;
- *TotalCoeff = ptr->total_coeff;
- BitstreamFlushBits(stream, ptr->length);
- }
- else
- {
- /* read 6 bit FLC */
- BitstreamReadBits(stream, 6, &code);
-
-
- *TrailingOnes = code & 3;
- *TotalCoeff = (code >> 2) + 1;
-
- if (*TotalCoeff > 16)
- {
- *TotalCoeff = 16; // _ERROR
- }
-
- if (code == 3)
- {
- *TrailingOnes = 0;
- (*TotalCoeff)--;
- }
- }
-#endif
- return AVCDEC_SUCCESS;
-}
-
-/* TO BE OPTIMIZED !!!!! */
-AVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff)
-{
- AVCDec_Status status;
-
- const static uint8 TotCofNTrail5[21][3] =
- {
- {3, 4, 7}, {3, 4, 7}, {2, 4, 8}, {1, 4, 8}, {2, 3, 7}, {2, 3, 7}, {1, 3, 7},
- {1, 3, 7}, {0, 4, 6}, {0, 3, 6}, {0, 2, 6}, {3, 3, 6}, {1, 2, 6}, {0, 1, 6},
- {2, 2, 3}, {0, 0, 2}, {0, 0, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}
- };
-
- uint code;
- uint8 *pcode;
-
- status = BitstreamShowBits(stream, 8, &code);
-
- if (code >= 32)
- {
- pcode = (uint8*) & (TotCofNTrail5[(code>>5)+13][0]);
- }
- else if (code >= 8)
- {
- pcode = (uint8*) & (TotCofNTrail5[(code>>2)+6][0]);
- }
- else
- {
- pcode = (uint8*) & (TotCofNTrail5[code][0]);
- }
-
- *TrailingOnes = pcode[0];
- *TotalCoeff = pcode[1];
-
- BitstreamFlushBits(stream, pcode[2]);
-
- return status;
-}
-
-/* see Table 9-6 */
-AVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code)
-{
- uint temp;
- uint leading_zeros = 0;
- BitstreamShowBits(stream, 16, &temp);
- temp |= 1 ;
-
- PV_CLZ(leading_zeros, temp)
-
- BitstreamFlushBits(stream, leading_zeros + 1);
- *code = leading_zeros;
- return AVCDEC_SUCCESS;
-}
-
-/* see Table 9-7 and 9-8 */
-AVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff)
-{
- const static uint8 TotZero1[28][2] = {{15, 9}, {14, 9}, {13, 9}, {12, 8},
- {12, 8}, {11, 8}, {11, 8}, {10, 7}, {9, 7}, {8, 6}, {8, 6}, {7, 6}, {7, 6}, {6, 5}, {6, 5},
- {6, 5}, {6, 5}, {5, 5}, {5, 5}, {5, 5}, {5, 5}, {4, 4}, {3, 4},
- {2, 3}, {2, 3}, {1, 3}, {1, 3}, {0, 1}
- };
-
- const static uint8 TotZero2n3[2][18][2] = {{{14, 6}, {13, 6}, {12, 6}, {11, 6},
- {10, 5}, {10, 5}, {9, 5}, {9, 5}, {8, 4}, {7, 4}, {6, 4}, {5, 4}, {4, 3}, {4, 3},
- {3, 3}, {2, 3}, {1, 3}, {0, 3}},
-
- /*const static uint8 TotZero3[18][2]=*/{{13, 6}, {11, 6}, {12, 5}, {12, 5}, {10, 5},
- {10, 5}, {9, 5}, {9, 5}, {8, 4}, {5, 4}, {4, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {3, 3},
- {2, 3}, {1, 3}}
- };
-
- const static uint8 TotZero4[17][2] = {{12, 5}, {11, 5}, {10, 5}, {0, 5}, {9, 4},
- {9, 4}, {7, 4}, {7, 4}, {3, 4}, {3, 4}, {2, 4}, {2, 4}, {8, 3}, {6, 3}, {5, 3}, {4, 3}, {1, 3}
- };
-
- const static uint8 TotZero5[13][2] = {{11, 5}, {9, 5}, {10, 4}, {8, 4}, {2, 4},
- {1, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3}
- };
-
- const static uint8 TotZero6to10[5][15][2] = {{{10, 6}, {0, 6}, {1, 5}, {1, 5}, {8, 4},
- {8, 4}, {8, 4}, {8, 4}, {9, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3}, {2, 3}},
-
- /*const static uint8 TotZero7[15][2]=*/{{9, 6}, {0, 6}, {1, 5}, {1, 5}, {7, 4},
- {7, 4}, {7, 4}, {7, 4}, {8, 3}, {6, 3}, {4, 3}, {3, 3}, {2, 3}, {5, 2}, {5, 2}},
-
- /*const static uint8 TotZero8[15][2]=*/{{8, 6}, {0, 6}, {2, 5}, {2, 5}, {1, 4},
- {1, 4}, {1, 4}, {1, 4}, {7, 3}, {6, 3}, {3, 3}, {5, 2}, {5, 2}, {4, 2}, {4, 2}},
-
- /*const static uint8 TotZero9[15][2]=*/{{1, 6}, {0, 6}, {7, 5}, {7, 5}, {2, 4},
- {2, 4}, {2, 4}, {2, 4}, {5, 3}, {6, 2}, {6, 2}, {4, 2}, {4, 2}, {3, 2}, {3, 2}},
-
- /*const static uint8 TotZero10[11][2]=*/{{1, 5}, {0, 5}, {6, 4}, {6, 4}, {2, 3},
- {2, 3}, {2, 3}, {2, 3}, {5, 2}, {4, 2}, {3, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}
- };
-
- const static uint8 TotZero11[7][2] = {{0, 4}, {1, 4}, {2, 3}, {2, 3}, {3, 3}, {5, 3}, {4, 1}};
-
- const static uint8 TotZero12to15[4][5][2] =
- {
- {{3, 1}, {2, 2}, {4, 3}, {1, 4}, {0, 4}},
- {{2, 1}, {3, 2}, {1, 3}, {0, 3}, {0, 0}},
- {{2, 1}, {1, 2}, {0, 2}, {0, 0}, {0, 0}},
- {{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}}
- };
-
- uint temp, mask;
- int indx;
- uint8 *pcode;
-
- if (TotalCoeff == 1)
- {
- BitstreamShowBits(stream, 9, &temp);
-
- if (temp >= 256)
- {
- pcode = (uint8*) & (TotZero1[27][0]);
- }
- else if (temp >= 64)
- {
- pcode = (uint8*) & (TotZero1[(temp>>5)+19][0]);
- }
- else if (temp >= 8)
- {
- pcode = (uint8*) & (TotZero1[(temp>>2)+5][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero1[temp-1][0]);
- }
-
- }
- else if (TotalCoeff == 2 || TotalCoeff == 3)
- {
- BitstreamShowBits(stream, 6, &temp);
-
- if (temp >= 32)
- {
- pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>3)+10][0]);
- }
- else if (temp >= 8)
- {
- pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>2)+6][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][temp][0]);
- }
- }
- else if (TotalCoeff == 4)
- {
- BitstreamShowBits(stream, 5, &temp);
-
- if (temp >= 12)
- {
- pcode = (uint8*) & (TotZero4[(temp>>2)+9][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero4[temp][0]);
- }
- }
- else if (TotalCoeff == 5)
- {
- BitstreamShowBits(stream, 5, &temp);
-
- if (temp >= 16)
- {
- pcode = (uint8*) & (TotZero5[(temp>>2)+5][0]);
- }
- else if (temp >= 2)
- {
- pcode = (uint8*) & (TotZero5[(temp>>1)+1][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero5[temp][0]);
- }
- }
- else if (TotalCoeff >= 6 && TotalCoeff <= 10)
- {
- if (TotalCoeff == 10)
- {
- BitstreamShowBits(stream, 5, &temp);
- }
- else
- {
- BitstreamShowBits(stream, 6, &temp);
- }
-
-
- if (temp >= 8)
- {
- pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][(temp>>3)+7][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][temp][0]);
- }
- }
- else if (TotalCoeff == 11)
- {
- BitstreamShowBits(stream, 4, &temp);
-
-
- if (temp >= 8)
- {
- pcode = (uint8*) & (TotZero11[6][0]);
- }
- else if (temp >= 4)
- {
- pcode = (uint8*) & (TotZero11[(temp>>1)+2][0]);
- }
- else
- {
- pcode = (uint8*) & (TotZero11[temp][0]);
- }
- }
- else
- {
- BitstreamShowBits(stream, (16 - TotalCoeff), &temp);
- mask = 1 << (15 - TotalCoeff);
- indx = 0;
- while ((temp&mask) == 0 && indx < (16 - TotalCoeff)) /* search location of 1 bit */
- {
- mask >>= 1;
- indx++;
- }
-
- pcode = (uint8*) & (TotZero12to15[TotalCoeff-12][indx]);
- }
-
- *code = pcode[0];
- BitstreamFlushBits(stream, pcode[1]);
-
- return AVCDEC_SUCCESS;
-}
-
-/* see Table 9-9 */
-AVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff)
-{
- const static uint8 TotZeroChrom1to3[3][8][2] =
- {
- {{3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},
- {{2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},
- {{1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},
- };
-
-
- uint temp;
- uint8 *pcode;
-
- BitstreamShowBits(stream, 3, &temp);
- pcode = (uint8*) & (TotZeroChrom1to3[TotalCoeff-1][temp]);
-
- *code = pcode[0];
-
- BitstreamFlushBits(stream, pcode[1]);
-
- return AVCDEC_SUCCESS;
-}
-
-/* see Table 9-10 */
-AVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zerosLeft)
-{
- const static int codlen[6] = {1, 2, 2, 3, 3, 3}; /* num bits to read */
- const static uint8 RunBeforeTab[6][8][2] = {{{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
- /*const static int RunBefore2[4][2]=*/{{2, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
- /*const static int RunBefore3[4][2]=*/{{3, 2}, {2, 2}, {1, 2}, {0, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
- /*const static int RunBefore4[7][2]=*/{{4, 3}, {3, 3}, {2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 2}, {0, 2}},
- /*const static int RunBefore5[7][2]=*/{{5, 3}, {4, 3}, {3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 2}, {0, 2}},
- /*const static int RunBefore6[7][2]=*/{{1, 3}, {2, 3}, {4, 3}, {3, 3}, {6, 3}, {5, 3}, {0, 2}, {0, 2}}
- };
-
- uint temp;
- uint8 *pcode;
- int indx;
-
- if (zerosLeft <= 6)
- {
- BitstreamShowBits(stream, codlen[zerosLeft-1], &temp);
-
- pcode = (uint8*) & (RunBeforeTab[zerosLeft-1][temp][0]);
-
- *code = pcode[0];
-
- BitstreamFlushBits(stream, pcode[1]);
- }
- else
- {
- BitstreamReadBits(stream, 3, &temp);
- if (temp)
- {
- *code = 7 - temp;
- }
- else
- {
- BitstreamShowBits(stream, 9, &temp);
- temp <<= 7;
- temp |= 1;
- indx = 0;
- PV_CLZ(indx, temp)
- *code = 7 + indx;
- BitstreamFlushBits(stream, indx + 1);
- }
- }
-
-
- return AVCDEC_SUCCESS;
-}
diff --git a/media/libstagefright/codecs/common/cmnMemory.c b/media/libstagefright/codecs/common/cmnMemory.c
index dd7c26d..aa52bd9 100644
--- a/media/libstagefright/codecs/common/cmnMemory.c
+++ b/media/libstagefright/codecs/common/cmnMemory.c
@@ -21,10 +21,8 @@
*******************************************************************************/
#include "cmnMemory.h"
-#include <malloc.h>
-#if defined LINUX
+#include <stdlib.h>
#include <string.h>
-#endif
//VO_MEM_OPERATOR g_memOP;
diff --git a/media/libstagefright/codecs/common/include/voType.h b/media/libstagefright/codecs/common/include/voType.h
index 70b2e83..5f659ab 100644
--- a/media/libstagefright/codecs/common/include/voType.h
+++ b/media/libstagefright/codecs/common/include/voType.h
@@ -101,7 +101,7 @@
since the compiler does not support the way the component was written.
*/
#ifndef VO_SKIP64BIT
-#ifdef _WIN32
+#ifdef _MSC_VER
/** VO_U64 is a 64 bit unsigned quantity that is 64 bit word aligned */
typedef unsigned __int64 VO_U64;
/** VO_S64 is a 64 bit signed quantity that is 64 bit word aligned */
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index a4ca32d..3246021 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -93,6 +93,11 @@
GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
| GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP));
+ CHECK_EQ(0,
+ native_window_set_scaling_mode(
+ mNativeWindow.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW));
+
// Width must be multiple of 32???
CHECK_EQ(0, native_window_set_buffers_geometry(
mNativeWindow.get(),
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index ca61b3d..90d64ba 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -494,6 +494,12 @@
bool firstTime = (mPlaylist == NULL);
+ if ((ssize_t)bandwidthIndex != mPrevBandwidthIndex) {
+ // If we switch bandwidths, do not pay any heed to whether
+ // playlists changed since the last time...
+ mPlaylist.clear();
+ }
+
bool unchanged;
sp<M3UParser> playlist = fetchPlaylist(url.c_str(), &unchanged);
if (playlist == NULL) {
@@ -779,7 +785,10 @@
keySource->setUID(mUID);
}
- status_t err = keySource->connect(keyURI.c_str());
+ status_t err =
+ keySource->connect(
+ keyURI.c_str(),
+ mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
if (err == OK) {
size_t offset = 0;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 123fbf8..9df9f59 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -106,21 +106,38 @@
return true;
}
- size_t n = strlen(baseURL);
- if (baseURL[n - 1] == '/') {
- out->setTo(baseURL);
- out->append(url);
- } else {
- const char *slashPos = strrchr(baseURL, '/');
+ if (url[0] == '/') {
+ // URL is an absolute path.
- if (slashPos > &baseURL[6]) {
- out->setTo(baseURL, slashPos - baseURL);
+ char *protocolEnd = strstr(baseURL, "//") + 2;
+ char *pathStart = strchr(protocolEnd, '/');
+
+ if (pathStart != NULL) {
+ out->setTo(baseURL, pathStart - baseURL);
} else {
out->setTo(baseURL);
}
- out->append("/");
out->append(url);
+ } else {
+ // URL is a relative path
+
+ size_t n = strlen(baseURL);
+ if (baseURL[n - 1] == '/') {
+ out->setTo(baseURL);
+ out->append(url);
+ } else {
+ const char *slashPos = strrchr(baseURL, '/');
+
+ if (slashPos > &baseURL[6]) {
+ out->setTo(baseURL, slashPos - baseURL);
+ } else {
+ out->setTo(baseURL);
+ }
+
+ out->append("/");
+ out->append(url);
+ }
}
LOGV("base:'%s', url:'%s' => '%s'", baseURL, url, out->c_str());
diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h
index d833e2e..18f8913 100644
--- a/media/libstagefright/include/ChromiumHTTPDataSource.h
+++ b/media/libstagefright/include/ChromiumHTTPDataSource.h
@@ -51,6 +51,8 @@
virtual String8 getMIMEType() const;
+ virtual status_t reconnectAtOffset(off64_t offset);
+
protected:
virtual ~ChromiumHTTPDataSource();
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index 2d6cb84..22b2855 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -77,6 +77,10 @@
kWhatRead = 'read',
};
+ enum {
+ kMaxNumRetries = 10,
+ };
+
sp<DataSource> mSource;
sp<AHandlerReflector<NuCachedSource2> > mReflector;
sp<ALooper> mLooper;
@@ -93,6 +97,8 @@
bool mFetching;
int64_t mLastFetchTimeUs;
+ int32_t mNumRetriesLeft;
+
void onMessageReceived(const sp<AMessage> &msg);
void onFetch();
void onRead(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 02b1c8e..1e33f05 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -38,7 +38,6 @@
{ "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
{ "OMX.google.amrwb.decoder", "amrdec", "audio_decoder.amrwb" },
{ "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" },
- { "OMX.google.avc.decoder", "avcdec", "video_decoder.avc" },
{ "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" },
{ "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
{ "OMX.google.h263.decoder", "mpeg4dec", "video_decoder.h263" },
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
new file mode 100644
index 0000000..3ea8f39
--- /dev/null
+++ b/media/libstagefright/tests/Android.mk
@@ -0,0 +1,53 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+ifneq ($(TARGET_SIMULATOR),true)
+
+LOCAL_MODULE := SurfaceMediaSource_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ SurfaceMediaSource_test.cpp \
+ DummyRecorder.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libEGL \
+ libGLESv2 \
+ libandroid \
+ libbinder \
+ libcutils \
+ libgui \
+ libstlport \
+ libui \
+ libutils \
+ libstagefright \
+ libstagefright_omx \
+ libstagefright_foundation \
+
+LOCAL_STATIC_LIBRARIES := \
+ libgtest \
+ libgtest_main \
+
+LOCAL_C_INCLUDES := \
+ bionic \
+ bionic/libstdc++/include \
+ external/gtest/include \
+ external/stlport/stlport \
+ frameworks/base/media/libstagefright \
+ frameworks/base/media/libstagefright/include \
+ $(TOP)/frameworks/base/include/media/stagefright/openmax \
+
+include $(BUILD_EXECUTABLE)
+
+endif
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/tests/DummyRecorder.cpp b/media/libstagefright/tests/DummyRecorder.cpp
new file mode 100644
index 0000000..8d75d6b
--- /dev/null
+++ b/media/libstagefright/tests/DummyRecorder.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DummyRecorder"
+// #define LOG_NDEBUG 0
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include "DummyRecorder.h"
+
+#include <utils/Log.h>
+
+namespace android {
+
+// static
+void *DummyRecorder::threadWrapper(void *pthis) {
+ LOGV("ThreadWrapper: %p", pthis);
+ DummyRecorder *writer = static_cast<DummyRecorder *>(pthis);
+ writer->readFromSource();
+ return NULL;
+}
+
+
+status_t DummyRecorder::start() {
+ LOGV("Start");
+ mStarted = true;
+
+ mSource->start();
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+ int err = pthread_create(&mThread, &attr, threadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ if (err) {
+ LOGE("Error creating thread!");
+ return -ENODEV;
+ }
+ return OK;
+}
+
+
+status_t DummyRecorder::stop() {
+ LOGV("Stop");
+ mStarted = false;
+
+ mSource->stop();
+ void *dummy;
+ pthread_join(mThread, &dummy);
+ status_t err = (status_t) dummy;
+
+ LOGV("Ending the reading thread");
+ return err;
+}
+
+// pretend to read the source buffers
+void DummyRecorder::readFromSource() {
+ LOGV("ReadFromSource");
+ if (!mStarted) {
+ return;
+ }
+
+ status_t err = OK;
+ MediaBuffer *buffer;
+ LOGV("A fake writer accessing the frames");
+ while (mStarted && (err = mSource->read(&buffer)) == OK){
+ // if not getting a valid buffer from source, then exit
+ if (buffer == NULL) {
+ return;
+ }
+ buffer->release();
+ buffer = NULL;
+ }
+}
+
+
+} // end of namespace android
diff --git a/media/libstagefright/tests/DummyRecorder.h b/media/libstagefright/tests/DummyRecorder.h
new file mode 100644
index 0000000..1cbea1b
--- /dev/null
+++ b/media/libstagefright/tests/DummyRecorder.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DUMMY_RECORDER_H_
+#define DUMMY_RECORDER_H_
+
+#include <pthread.h>
+#include <utils/String8.h>
+#include <media/stagefright/foundation/ABase.h>
+
+
+namespace android {
+
+class MediaSource;
+class MediaBuffer;
+
+class DummyRecorder {
+ public:
+ // The media source from which this will receive frames
+ sp<MediaSource> mSource;
+ bool mStarted;
+ pthread_t mThread;
+
+ status_t start();
+ status_t stop();
+
+ // actual entry point for the thread
+ void readFromSource();
+
+ // static function to wrap the actual thread entry point
+ static void *threadWrapper(void *pthis);
+
+ DummyRecorder(const sp<MediaSource> &source) : mSource(source)
+ , mStarted(false) {}
+ ~DummyRecorder( ) {}
+
+ private:
+
+ DISALLOW_EVIL_CONSTRUCTORS(DummyRecorder);
+};
+
+} // end of namespace android
+#endif
+
+
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
new file mode 100644
index 0000000..ce10812
--- /dev/null
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SurfaceMediaSource_test"
+// #define LOG_NDEBUG 0
+
+#include <gtest/gtest.h>
+#include <utils/String8.h>
+#include <utils/Errors.h>
+
+#include <media/stagefright/SurfaceMediaSource.h>
+
+#include <gui/SurfaceTextureClient.h>
+#include <ui/GraphicBuffer.h>
+#include <surfaceflinger/ISurfaceComposer.h>
+#include <surfaceflinger/Surface.h>
+#include <surfaceflinger/SurfaceComposerClient.h>
+
+#include <binder/ProcessState.h>
+#include <ui/FramebufferNativeWindow.h>
+
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MPEG4Writer.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <OMX_Component.h>
+
+#include "DummyRecorder.h"
+
+namespace android {
+
+
+class SurfaceMediaSourceTest : public ::testing::Test {
+public:
+
+ SurfaceMediaSourceTest( ): mYuvTexWidth(64), mYuvTexHeight(66) { }
+ sp<MPEG4Writer> setUpWriter(OMXClient &client );
+ void oneBufferPass(int width, int height );
+ static void fillYV12Buffer(uint8_t* buf, int w, int h, int stride) ;
+ static void fillYV12BufferRect(uint8_t* buf, int w, int h,
+ int stride, const android_native_rect_t& rect) ;
+protected:
+
+ virtual void SetUp() {
+ mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
+ mSMS->setSynchronousMode(true);
+ mSTC = new SurfaceTextureClient(mSMS);
+ mANW = mSTC;
+
+ }
+
+
+ virtual void TearDown() {
+ mSMS.clear();
+ mSTC.clear();
+ mANW.clear();
+ }
+
+ const int mYuvTexWidth;// = 64;
+ const int mYuvTexHeight;// = 66;
+
+ sp<SurfaceMediaSource> mSMS;
+ sp<SurfaceTextureClient> mSTC;
+ sp<ANativeWindow> mANW;
+
+};
+
+void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) {
+ LOGV("One Buffer Pass");
+ ANativeWindowBuffer* anb;
+ ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));
+ ASSERT_TRUE(anb != NULL);
+
+ sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ ASSERT_EQ(NO_ERROR, mANW->lockBuffer(mANW.get(), buf->getNativeBuffer()));
+
+ // Fill the buffer with the a checkerboard pattern
+ uint8_t* img = NULL;
+ buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
+ SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride());
+ buf->unlock();
+
+ ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer()));
+}
+
+sp<MPEG4Writer> SurfaceMediaSourceTest::setUpWriter(OMXClient &client ) {
+ // Writing to a file
+ const char *fileName = "/sdcard/outputSurfEnc.mp4";
+ sp<MetaData> enc_meta = new MetaData;
+ enc_meta->setInt32(kKeyBitRate, 300000);
+ enc_meta->setInt32(kKeyFrameRate, 30);
+
+ enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+
+ sp<MetaData> meta = mSMS->getFormat();
+
+ int32_t width, height, stride, sliceHeight, colorFormat;
+ CHECK(meta->findInt32(kKeyWidth, &width));
+ CHECK(meta->findInt32(kKeyHeight, &height));
+ CHECK(meta->findInt32(kKeyStride, &stride));
+ CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
+ CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
+
+ enc_meta->setInt32(kKeyWidth, width);
+ enc_meta->setInt32(kKeyHeight, height);
+ enc_meta->setInt32(kKeyIFramesInterval, 1);
+ enc_meta->setInt32(kKeyStride, stride);
+ enc_meta->setInt32(kKeySliceHeight, sliceHeight);
+ // TODO: overwriting the colorformat since the format set by GRAlloc
+ // could be wrong or not be read by OMX
+ enc_meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar);
+ // colorFormat);
+
+
+ sp<MediaSource> encoder =
+ OMXCodec::Create(
+ client.interface(), enc_meta, true /* createEncoder */, mSMS);
+
+ sp<MPEG4Writer> writer = new MPEG4Writer(fileName);
+ writer->addSource(encoder);
+
+ return writer;
+}
+
+// Fill a YV12 buffer with a multi-colored checkerboard pattern
+void SurfaceMediaSourceTest::fillYV12Buffer(uint8_t* buf, int w, int h, int stride) {
+ const int blockWidth = w > 16 ? w / 16 : 1;
+ const int blockHeight = h > 16 ? h / 16 : 1;
+ const int yuvTexOffsetY = 0;
+ int yuvTexStrideY = stride;
+ int yuvTexOffsetV = yuvTexStrideY * h;
+ int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf;
+ int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2;
+ int yuvTexStrideU = yuvTexStrideV;
+ for (int x = 0; x < w; x++) {
+ for (int y = 0; y < h; y++) {
+ int parityX = (x / blockWidth) & 1;
+ int parityY = (y / blockHeight) & 1;
+ unsigned char intensity = (parityX ^ parityY) ? 63 : 191;
+ buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = intensity;
+ if (x < w / 2 && y < h / 2) {
+ buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = intensity;
+ if (x * 2 < w / 2 && y * 2 < h / 2) {
+ buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 0] =
+ buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 1] =
+ buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 0] =
+ buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 1] =
+ intensity;
+ }
+ }
+ }
+ }
+}
+
+// Fill a YV12 buffer with red outside a given rectangle and green inside it.
+void SurfaceMediaSourceTest::fillYV12BufferRect(uint8_t* buf, int w,
+ int h, int stride, const android_native_rect_t& rect) {
+ const int yuvTexOffsetY = 0;
+ int yuvTexStrideY = stride;
+ int yuvTexOffsetV = yuvTexStrideY * h;
+ int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf;
+ int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2;
+ int yuvTexStrideU = yuvTexStrideV;
+ for (int x = 0; x < w; x++) {
+ for (int y = 0; y < h; y++) {
+ bool inside = rect.left <= x && x < rect.right &&
+ rect.top <= y && y < rect.bottom;
+ buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = inside ? 240 : 64;
+ if (x < w / 2 && y < h / 2) {
+ bool inside = rect.left <= 2*x && 2*x < rect.right &&
+ rect.top <= 2*y && 2*y < rect.bottom;
+ buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = 16;
+ buf[yuvTexOffsetV + (y * yuvTexStrideV) + x] =
+ inside ? 16 : 255;
+ }
+ }
+ }
+} ///////// End of class SurfaceMediaSourceTest
+
+///////////////////////////////////////////////////////////////////
+// Class to imitate the recording /////////////////////////////
+// ////////////////////////////////////////////////////////////////
+struct SimpleDummyRecorder {
+ sp<MediaSource> mSource;
+
+ SimpleDummyRecorder
+ (const sp<MediaSource> &source): mSource(source) {}
+
+ status_t start() { return mSource->start();}
+ status_t stop() { return mSource->stop();}
+
+ // fakes reading from a media source
+ status_t readFromSource() {
+ MediaBuffer *buffer;
+ status_t err = mSource->read(&buffer);
+ if (err != OK) {
+ return err;
+ }
+ buffer->release();
+ buffer = NULL;
+ return OK;
+ }
+};
+
+///////////////////////////////////////////////////////////////////
+// TESTS
+// Just pass one buffer from the native_window to the SurfaceMediaSource
+TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotOneBufferPass) {
+ LOGV("Testing OneBufferPass ******************************");
+
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 0, 0, HAL_PIXEL_FORMAT_YV12));
+ // OMX_COLOR_FormatYUV420Planar)); // ));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+
+ oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+}
+
+// Pass the buffer with the wrong height and weight and should not be accepted
+TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) {
+ LOGV("Testing Wrong size BufferPass ******************************");
+
+ // setting the client side buffer size different than the server size
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 10, 10, HAL_PIXEL_FORMAT_YV12));
+ // OMX_COLOR_FormatYUV420Planar)); // ));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+
+ ANativeWindowBuffer* anb;
+
+ // make sure we get an error back when dequeuing!
+ ASSERT_NE(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));
+}
+
+
+// pass multiple buffers from the native_window the SurfaceMediaSource
+// A dummy writer is used to simulate actual MPEG4Writer
+TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
+ LOGV("Testing MultiBufferPass, Dummy Recorder *********************");
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 0, 0, HAL_PIXEL_FORMAT_YV12));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+ SimpleDummyRecorder writer(mSMS);
+ writer.start();
+
+ int32_t nFramesCount = 0;
+ while (nFramesCount < 300) {
+ oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+
+ ASSERT_EQ(NO_ERROR, writer.readFromSource());
+
+ nFramesCount++;
+ }
+ writer.stop();
+}
+
+// Delayed pass of multiple buffers from the native_window the SurfaceMediaSource
+// A dummy writer is used to simulate actual MPEG4Writer
+TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassLag) {
+ LOGV("Testing MultiBufferPass, Dummy Recorder Lagging **************");
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 0, 0, HAL_PIXEL_FORMAT_YV12));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+ SimpleDummyRecorder writer(mSMS);
+ writer.start();
+
+ int32_t nFramesCount = 1;
+ const int FRAMES_LAG = mSMS->getBufferCount() - 1;
+ while (nFramesCount <= 300) {
+ oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+ // Forcing the writer to lag behind a few frames
+ if (nFramesCount > FRAMES_LAG) {
+ ASSERT_EQ(NO_ERROR, writer.readFromSource());
+ }
+ nFramesCount++;
+ }
+ writer.stop();
+}
+
+// pass multiple buffers from the native_window the SurfaceMediaSource
+// A dummy writer (MULTITHREADED) is used to simulate actual MPEG4Writer
+TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassThreaded) {
+ LOGV("Testing MultiBufferPass, Dummy Recorder Multi-Threaded **********");
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 0, 0, HAL_PIXEL_FORMAT_YV12));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+
+ DummyRecorder writer(mSMS);
+ writer.start();
+
+ int32_t nFramesCount = 0;
+ while (nFramesCount <= 300) {
+ oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+
+ nFramesCount++;
+ }
+ writer.stop();
+}
+
+// Test to examine the actual encoding. Temporarily disabled till the
+// colorformat and encoding from GRAlloc data is resolved
+TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuFilledYV12BufferNpotWrite) {
+ LOGV("Testing the whole pipeline with actual Recorder");
+ ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
+ 0, 0, HAL_PIXEL_FORMAT_YV12)); // OMX_COLOR_FormatYUV420Planar)); // ));
+ ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+
+ OMXClient client;
+ CHECK_EQ(OK, client.connect());
+
+ sp<MPEG4Writer> writer = setUpWriter(client);
+ int64_t start = systemTime();
+ CHECK_EQ(OK, writer->start());
+
+ int32_t nFramesCount = 0;
+ while (nFramesCount <= 300) {
+ oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+ nFramesCount++;
+ }
+
+ CHECK_EQ(OK, writer->stop());
+ writer.clear();
+ int64_t end = systemTime();
+ client.disconnect();
+}
+
+
+} // namespace android
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index 817eac0..20225ba 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -345,56 +345,28 @@
#ifdef MTP_DEVICE
int MtpDataPacket::read(int fd) {
- // first read the header
- int ret = ::read(fd, mBuffer, MTP_CONTAINER_HEADER_SIZE);
- if (ret != MTP_CONTAINER_HEADER_SIZE)
+ int ret = ::read(fd, mBuffer, mBufferSize);
+ if (ret < MTP_CONTAINER_HEADER_SIZE)
return -1;
- // then the following data
- int total = MtpPacket::getUInt32(MTP_CONTAINER_LENGTH_OFFSET);
- allocate(total);
- int remaining = total - MTP_CONTAINER_HEADER_SIZE;
- ret = ::read(fd, &mBuffer[0] + MTP_CONTAINER_HEADER_SIZE, remaining);
- if (ret != remaining)
- return -1;
-
- mPacketSize = total;
+ mPacketSize = ret;
mOffset = MTP_CONTAINER_HEADER_SIZE;
- return total;
-}
-
-int MtpDataPacket::readDataHeader(int fd) {
- int ret = ::read(fd, mBuffer, MTP_CONTAINER_HEADER_SIZE);
- if (ret > 0)
- mPacketSize = ret;
- else
- mPacketSize = 0;
return ret;
}
int MtpDataPacket::write(int fd) {
MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, mPacketSize);
MtpPacket::putUInt16(MTP_CONTAINER_TYPE_OFFSET, MTP_CONTAINER_TYPE_DATA);
- // send header separately from data
- int ret = ::write(fd, mBuffer, MTP_CONTAINER_HEADER_SIZE);
- if (ret == MTP_CONTAINER_HEADER_SIZE)
- ret = ::write(fd, mBuffer + MTP_CONTAINER_HEADER_SIZE,
- mPacketSize - MTP_CONTAINER_HEADER_SIZE);
- return (ret < 0 ? ret : 0);
-}
-
-int MtpDataPacket::writeDataHeader(int fd, uint32_t length) {
- MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, length);
- MtpPacket::putUInt16(MTP_CONTAINER_TYPE_OFFSET, MTP_CONTAINER_TYPE_DATA);
- int ret = ::write(fd, mBuffer, MTP_CONTAINER_HEADER_SIZE);
+ int ret = ::write(fd, mBuffer, mPacketSize);
return (ret < 0 ? ret : 0);
}
int MtpDataPacket::writeData(int fd, void* data, uint32_t length) {
- MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, length + MTP_CONTAINER_HEADER_SIZE);
+ allocate(length);
+ memcpy(mBuffer + MTP_CONTAINER_HEADER_SIZE, data, length);
+ length += MTP_CONTAINER_HEADER_SIZE;
+ MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, length);
MtpPacket::putUInt16(MTP_CONTAINER_TYPE_OFFSET, MTP_CONTAINER_TYPE_DATA);
- int ret = ::write(fd, mBuffer, MTP_CONTAINER_HEADER_SIZE);
- if (ret == MTP_CONTAINER_HEADER_SIZE)
- ret = ::write(fd, data, length);
+ int ret = ::write(fd, mBuffer, length);
return (ret < 0 ? ret : 0);
}
diff --git a/media/mtp/MtpDataPacket.h b/media/mtp/MtpDataPacket.h
index 8a08948..2b81063 100644
--- a/media/mtp/MtpDataPacket.h
+++ b/media/mtp/MtpDataPacket.h
@@ -41,6 +41,7 @@
void setOperationCode(MtpOperationCode code);
void setTransactionID(MtpTransactionID id);
+ inline const uint8_t* getData() const { return mBuffer + MTP_CONTAINER_HEADER_SIZE; }
inline uint8_t getUInt8() { return (uint8_t)mBuffer[mOffset++]; }
inline int8_t getInt8() { return (int8_t)mBuffer[mOffset++]; }
uint16_t getUInt16();
@@ -95,11 +96,9 @@
#ifdef MTP_DEVICE
// fill our buffer with data from the given file descriptor
int read(int fd);
- int readDataHeader(int fd);
// write our data to the given file descriptor
int write(int fd);
- int writeDataHeader(int fd, uint32_t length);
int writeData(int fd, void* data, uint32_t length);
#endif
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 4047e2e..a9b539b 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -731,14 +731,12 @@
}
mfr.offset = 0;
mfr.length = fileLength;
-
- // send data header
- mData.setOperationCode(mRequest.getOperationCode());
- mData.setTransactionID(mRequest.getTransactionID());
- mData.writeDataHeader(mFD, fileLength + MTP_CONTAINER_HEADER_SIZE);
+ mfr.command = mRequest.getOperationCode();
+ mfr.transaction_id = mRequest.getTransactionID();
// then transfer the file
- int ret = ioctl(mFD, MTP_SEND_FILE, (unsigned long)&mfr);
+ int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
+ LOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
close(mfr.fd);
if (ret < 0) {
if (errno == ECANCELED)
@@ -798,15 +796,13 @@
}
mfr.offset = offset;
mfr.length = length;
+ mfr.command = mRequest.getOperationCode();
+ mfr.transaction_id = mRequest.getTransactionID();
mResponse.setParameter(1, length);
- // send data header
- mData.setOperationCode(mRequest.getOperationCode());
- mData.setTransactionID(mRequest.getTransactionID());
- mData.writeDataHeader(mFD, length + MTP_CONTAINER_HEADER_SIZE);
-
- // then transfer the file
- int ret = ioctl(mFD, MTP_SEND_FILE, (unsigned long)&mfr);
+ // transfer the file
+ int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
+ LOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
close(mfr.fd);
if (ret < 0) {
if (errno == ECANCELED)
@@ -918,7 +914,7 @@
return MTP_RESPONSE_GENERAL_ERROR;
MtpResponseCode result = MTP_RESPONSE_OK;
mode_t mask;
- int ret;
+ int ret, initialData;
if (mSendObjectHandle == kInvalidObjectHandle) {
LOGE("Expected SendObjectInfo before SendObject");
@@ -926,12 +922,13 @@
goto done;
}
- // read the header
- ret = mData.readDataHeader(mFD);
- // FIXME - check for errors here.
-
- // reset so we don't attempt to send this back
- mData.reset();
+ // read the header, and possibly some data
+ ret = mData.read(mFD);
+ if (ret < MTP_CONTAINER_HEADER_SIZE) {
+ result = MTP_RESPONSE_GENERAL_ERROR;
+ goto done;
+ }
+ initialData = ret - MTP_CONTAINER_HEADER_SIZE;
mtp_file_range mfr;
mfr.fd = open(mSendObjectFilePath, O_RDWR | O_CREAT | O_TRUNC);
@@ -945,16 +942,20 @@
fchmod(mfr.fd, mFilePermission);
umask(mask);
- mfr.offset = 0;
- mfr.length = mSendObjectFileSize;
+ if (initialData > 0)
+ ret = write(mfr.fd, mData.getData(), initialData);
- LOGV("receiving %s\n", (const char *)mSendObjectFilePath);
- // transfer the file
- ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+ if (mSendObjectFileSize - initialData > 0) {
+ mfr.offset = initialData;
+ mfr.length = mSendObjectFileSize - initialData;
+
+ LOGV("receiving %s\n", (const char *)mSendObjectFilePath);
+ // transfer the file
+ ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+ LOGV("MTP_RECEIVE_FILE returned %d\n", ret);
+ }
close(mfr.fd);
- LOGV("MTP_RECEIVE_FILE returned %d", ret);
-
if (ret < 0) {
unlink(mSendObjectFilePath);
if (errno == ECANCELED)
@@ -964,6 +965,9 @@
}
done:
+ // reset so we don't attempt to send the data back
+ mData.reset();
+
mDatabase->endSendObject(mSendObjectFilePath, mSendObjectHandle, mSendObjectFormat,
result == MTP_RESPONSE_OK);
mSendObjectHandle = kInvalidObjectHandle;
@@ -1096,23 +1100,31 @@
return MTP_RESPONSE_GENERAL_ERROR;
}
- // read the header
- int ret = mData.readDataHeader(mFD);
- // FIXME - check for errors here.
-
- // reset so we don't attempt to send this back
- mData.reset();
-
const char* filePath = (const char *)edit->mPath;
- LOGV("receiving partial %s %lld %ld\n", filePath, offset, length);
- mtp_file_range mfr;
- mfr.fd = edit->mFD;
- mfr.offset = offset;
- mfr.length = length;
+ LOGV("receiving partial %s %lld %lld\n", filePath, offset, length);
- // transfer the file
- ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
- LOGV("MTP_RECEIVE_FILE returned %d", ret);
+ // read the header, and possibly some data
+ int ret = mData.read(mFD);
+ if (ret < MTP_CONTAINER_HEADER_SIZE)
+ return MTP_RESPONSE_GENERAL_ERROR;
+ int initialData = ret - MTP_CONTAINER_HEADER_SIZE;
+
+ if (initialData > 0) {
+ ret = write(edit->mFD, mData.getData(), initialData);
+ offset += initialData;
+ length -= initialData;
+ }
+
+ if (length > 0) {
+ mtp_file_range mfr;
+ mfr.fd = edit->mFD;
+ mfr.offset = offset;
+ mfr.length = length;
+
+ // transfer the file
+ ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+ LOGV("MTP_RECEIVE_FILE returned %d", ret);
+ }
if (ret < 0) {
mResponse.setParameter(1, 0);
if (errno == ECANCELED)
@@ -1120,6 +1132,9 @@
else
return MTP_RESPONSE_GENERAL_ERROR;
}
+
+ // reset so we don't attempt to send this back
+ mData.reset();
mResponse.setParameter(1, length);
uint64_t end = offset + length;
if (end > edit->mSize) {
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 6bb1f56..a0407b9 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -24,6 +24,7 @@
libdl
LOCAL_STATIC_LIBRARIES := \
+ libcpustats \
libmedia_helper
LOCAL_MODULE:= libaudioflinger
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index daf94f2..0323fe0 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -52,6 +52,9 @@
#include <media/EffectsFactoryApi.h>
#include <audio_effects/effect_visualizer.h>
+#include <cpustats/ThreadCpuUsage.h>
+// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds
+
// ----------------------------------------------------------------------------
@@ -78,6 +81,8 @@
static const nsecs_t kWarningThrottle = seconds(5);
+// RecordThread loop sleep time upon application overrun or audio HAL read error
+static const int kRecordThreadSleepUs = 5000;
// ----------------------------------------------------------------------------
@@ -414,7 +419,7 @@
lSessionId = *sessionId;
} else {
// if no audio session id is provided, create one here
- lSessionId = nextUniqueId_l();
+ lSessionId = nextUniqueId();
if (sessionId != NULL) {
*sessionId = lSessionId;
}
@@ -722,6 +727,15 @@
thread = checkPlaybackThread_l(ioHandle);
if (thread == NULL) {
thread = checkRecordThread_l(ioHandle);
+ } else if (thread.get() == primaryPlaybackThread_l()) {
+ // indicate output device change to all input threads for pre processing
+ AudioParameter param = AudioParameter(keyValuePairs);
+ int value;
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
+ }
+ }
}
}
if (thread != NULL) {
@@ -870,10 +884,10 @@
// ----------------------------------------------------------------------------
-AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, int id)
+AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, int id, uint32_t device)
: Thread(false),
mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mChannelCount(0),
- mFrameSize(1), mFormat(0), mStandby(false), mId(id), mExiting(false)
+ mFrameSize(1), mFormat(0), mStandby(false), mId(id), mExiting(false), mDevice(device)
{
}
@@ -1029,14 +1043,15 @@
return NO_ERROR;
}
-
// ----------------------------------------------------------------------------
-AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
- : ThreadBase(audioFlinger, id),
+AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ int id,
+ uint32_t device)
+ : ThreadBase(audioFlinger, id, device),
mMixBuffer(0), mSuspended(0), mBytesWritten(0), mOutput(output),
- mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
- mDevice(device)
+ mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false)
{
readOutputParameters();
@@ -1196,9 +1211,9 @@
}
}
- if (mOutput == 0) {
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
LOGE("Audio driver not initialized.");
- lStatus = NO_INIT;
goto Exit;
}
@@ -1420,7 +1435,7 @@
if (halFrames == 0 || dspFrames == 0) {
return BAD_VALUE;
}
- if (mOutput == 0) {
+ if (initCheck() != NO_ERROR) {
return INVALID_OPERATION;
}
*halFrames = mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
@@ -1465,34 +1480,6 @@
return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
-sp<AudioFlinger::EffectChain> AudioFlinger::PlaybackThread::getEffectChain(int sessionId)
-{
- Mutex::Autolock _l(mLock);
- return getEffectChain_l(sessionId);
-}
-
-sp<AudioFlinger::EffectChain> AudioFlinger::PlaybackThread::getEffectChain_l(int sessionId)
-{
- sp<EffectChain> chain;
-
- size_t size = mEffectChains.size();
- for (size_t i = 0; i < size; i++) {
- if (mEffectChains[i]->sessionId() == sessionId) {
- chain = mEffectChains[i];
- break;
- }
- }
- return chain;
-}
-
-void AudioFlinger::PlaybackThread::setMode(uint32_t mode)
-{
- Mutex::Autolock _l(mLock);
- size_t size = mEffectChains.size();
- for (size_t i = 0; i < size; i++) {
- mEffectChains[i]->setMode_l(mode);
- }
-}
// ----------------------------------------------------------------------------
@@ -1500,7 +1487,7 @@
: PlaybackThread(audioFlinger, output, id, device),
mAudioMixer(0)
{
- mType = PlaybackThread::MIXER;
+ mType = ThreadBase::MIXER;
mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
// FIXME - Current mixer implementation only supports stereo output
@@ -1529,9 +1516,40 @@
uint32_t idleSleepTime = idleSleepTimeUs();
uint32_t sleepTime = idleSleepTime;
Vector< sp<EffectChain> > effectChains;
+#ifdef DEBUG_CPU_USAGE
+ ThreadCpuUsage cpu;
+ const CentralTendencyStatistics& stats = cpu.statistics();
+#endif
while (!exitPending())
{
+#ifdef DEBUG_CPU_USAGE
+ cpu.sampleAndEnable();
+ unsigned n = stats.n();
+ // cpu.elapsed() is expensive, so don't call it every loop
+ if ((n & 127) == 1) {
+ long long elapsed = cpu.elapsed();
+ if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
+ double perLoop = elapsed / (double) n;
+ double perLoop100 = perLoop * 0.01;
+ double mean = stats.mean();
+ double stddev = stats.stddev();
+ double minimum = stats.minimum();
+ double maximum = stats.maximum();
+ cpu.resetStatistics();
+ LOGI("CPU usage over past %.1f secs (%u mixer loops at %.1f mean ms per loop):\n us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f",
+ elapsed * .000000001, n, perLoop * .000001,
+ mean * .001,
+ stddev * .001,
+ minimum * .001,
+ maximum * .001,
+ mean / perLoop100,
+ stddev / perLoop100,
+ minimum / perLoop100,
+ maximum / perLoop100);
+ }
+ }
+#endif
processConfigEvents();
mixerStatus = MIXER_IDLE;
@@ -2038,7 +2056,7 @@
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
: PlaybackThread(audioFlinger, output, id, device)
{
- mType = PlaybackThread::DIRECT;
+ mType = ThreadBase::DIRECT;
}
AudioFlinger::DirectOutputThread::~DirectOutputThread()
@@ -2517,7 +2535,7 @@
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, AudioFlinger::MixerThread* mainThread, int id)
: MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device()), mWaitTimeMs(UINT_MAX)
{
- mType = PlaybackThread::DUPLICATING;
+ mType = ThreadBase::DUPLICATING;
addOutputTrack(mainThread);
}
@@ -2936,7 +2954,7 @@
mStreamType = streamType;
// NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
// 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
- mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * audio_bytes_per_sample(format) : sizeof(uint8_t);
+ mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t);
}
}
@@ -3717,21 +3735,26 @@
if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
lSessionId = *sessionId;
} else {
- lSessionId = nextUniqueId_l();
+ lSessionId = nextUniqueId();
if (sessionId != NULL) {
*sessionId = lSessionId;
}
}
// create new record track. The record track uses one track in mHardwareMixerThread by convention.
- recordTrack = new RecordThread::RecordTrack(thread, client, sampleRate,
- format, channelMask, frameCount, flags, lSessionId);
+ recordTrack = thread->createRecordTrack_l(client,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ lSessionId,
+ &lStatus);
}
- if (recordTrack->getCblk() == NULL) {
+ if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the RecordTrack so that the Client
// destructor is called by the TrackBase destructor with mLock held
client.clear();
recordTrack.clear();
- lStatus = NO_MEMORY;
goto Exit;
}
@@ -3780,10 +3803,16 @@
// ----------------------------------------------------------------------------
-AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, uint32_t sampleRate, uint32_t channels, int id) :
- ThreadBase(audioFlinger, id),
- mInput(input), mResampler(0), mRsmpOutBuffer(0), mRsmpInBuffer(0)
+AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamIn *input,
+ uint32_t sampleRate,
+ uint32_t channels,
+ int id,
+ uint32_t device) :
+ ThreadBase(audioFlinger, id, device),
+ mInput(input), mTrack(NULL), mResampler(0), mRsmpOutBuffer(0), mRsmpInBuffer(0)
{
+ mType = ThreadBase::RECORD;
mReqChannelCount = popcount(channels);
mReqSampleRate = sampleRate;
readInputParameters();
@@ -3813,6 +3842,7 @@
{
AudioBufferProvider::Buffer buffer;
sp<RecordTrack> activeTrack;
+ Vector< sp<EffectChain> > effectChains;
nsecs_t lastWarning = 0;
@@ -3863,14 +3893,22 @@
mStandby = false;
}
}
+ lockEffectChains_l(effectChains);
}
if (mActiveTrack != 0) {
if (mActiveTrack->mState != TrackBase::ACTIVE &&
mActiveTrack->mState != TrackBase::RESUMING) {
- usleep(5000);
+ unlockEffectChains(effectChains);
+ usleep(kRecordThreadSleepUs);
continue;
}
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ effectChains[i]->process_l();
+ }
+ // enable changes in effect chain
+ unlockEffectChains(effectChains);
+
buffer.frameCount = mFrameCount;
if (LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
size_t framesOut = buffer.frameCount;
@@ -3919,7 +3957,7 @@
// Force input into standby so that it tries to
// recover at next read attempt
mInput->stream->common.standby(&mInput->stream->common);
- usleep(5000);
+ usleep(kRecordThreadSleepUs);
}
mRsmpInIndex = mFrameCount;
framesOut = 0;
@@ -3967,9 +4005,12 @@
// Release the processor for a while before asking for a new buffer.
// This will give the application more chance to read from the buffer and
// clear the overflow.
- usleep(5000);
+ usleep(kRecordThreadSleepUs);
}
+ } else {
+ unlockEffectChains(effectChains);
}
+ effectChains.clear();
}
if (!mStandby) {
@@ -3983,6 +4024,49 @@
return false;
}
+
+sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ uint32_t sampleRate,
+ int format,
+ int channelMask,
+ int frameCount,
+ uint32_t flags,
+ int sessionId,
+ status_t *status)
+{
+ sp<RecordTrack> track;
+ status_t lStatus;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ LOGE("Audio driver not initialized.");
+ goto Exit;
+ }
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+
+ track = new RecordTrack(this, client, sampleRate,
+ format, channelMask, frameCount, flags, sessionId);
+
+ if (track->getCblk() == NULL) {
+ lStatus = NO_MEMORY;
+ goto Exit;
+ }
+
+ mTrack = track.get();
+
+ }
+ lStatus = NO_ERROR;
+
+Exit:
+ if (status) {
+ *status = lStatus;
+ }
+ return track;
+}
+
status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack)
{
LOGV("RecordThread::start");
@@ -4112,7 +4196,7 @@
// Force input into standby so that it tries to
// recover at next read attempt
mInput->stream->common.standby(&mInput->stream->common);
- usleep(5000);
+ usleep(kRecordThreadSleepUs);
}
buffer->raw = 0;
buffer->frameCount = 0;
@@ -4177,6 +4261,23 @@
reconfig = true;
}
}
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(value);
+ }
+ // store input device and output device but do not forward output device to audio HAL.
+ // Note that status is ignored by the caller for output device
+ // (see AudioFlinger::setParameters()
+ if (value & AUDIO_DEVICE_OUT_ALL) {
+ mDevice &= (uint32_t)~(value & AUDIO_DEVICE_OUT_ALL);
+ status = BAD_VALUE;
+ } else {
+ mDevice &= (uint32_t)~(value & AUDIO_DEVICE_IN_ALL);
+ }
+ mDevice |= (uint32_t)value;
+ }
if (status == NO_ERROR) {
status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
if (status == INVALID_OPERATION) {
@@ -4286,6 +4387,21 @@
return mInput->stream->get_input_frames_lost(mInput->stream);
}
+uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId)
+{
+ Mutex::Autolock _l(mLock);
+ uint32_t result = 0;
+ if (getEffectChain_l(sessionId) != 0) {
+ result = EFFECT_SESSION;
+ }
+
+ if (mTrack != NULL && sessionId == mTrack->sessionId()) {
+ result |= TRACK_SESSION;
+ }
+
+ return result;
+}
+
// ----------------------------------------------------------------------------
int AudioFlinger::openOutput(uint32_t *pDevices,
@@ -4334,7 +4450,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
if (outStream != NULL) {
AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
- int id = nextUniqueId_l();
+ int id = nextUniqueId();
if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) ||
(format != AUDIO_FORMAT_PCM_16_BIT) ||
@@ -4371,7 +4487,7 @@
return 0;
}
- int id = nextUniqueId_l();
+ int id = nextUniqueId();
DuplicatingThread *thread = new DuplicatingThread(this, thread1, id);
thread->addOutputTrack(thread2);
mPlaybackThreads.add(id, thread);
@@ -4394,9 +4510,9 @@
LOGV("closeOutput() %d", output);
- if (thread->type() == PlaybackThread::MIXER) {
+ if (thread->type() == ThreadBase::MIXER) {
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->type() == PlaybackThread::DUPLICATING) {
+ if (mPlaybackThreads.valueAt(i)->type() == ThreadBase::DUPLICATING) {
DuplicatingThread *dupThread = (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
dupThread->removeOutputTrack((MixerThread *)thread.get());
}
@@ -4408,7 +4524,7 @@
}
thread->exit();
- if (thread->type() != PlaybackThread::DUPLICATING) {
+ if (thread->type() != ThreadBase::DUPLICATING) {
AudioStreamOut *out = thread->getOutput();
out->hwDev->close_output_stream(out->hwDev, out->stream);
delete out;
@@ -4503,9 +4619,17 @@
if (inStream != NULL) {
AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
- int id = nextUniqueId_l();
- // Start record thread
- thread = new RecordThread(this, input, reqSamplingRate, reqChannels, id);
+ int id = nextUniqueId();
+ // Start record thread
+ // RecorThread require both input and output device indication to forward to audio
+ // pre processing modules
+ uint32_t device = (*pDevices) | primaryOutputDevice_l();
+ thread = new RecordThread(this,
+ input,
+ reqSamplingRate,
+ reqChannels,
+ id,
+ device);
mRecordThreads.add(id, thread);
LOGV("openInput() created record thread: ID %d thread %p", id, thread);
if (pSamplingRate) *pSamplingRate = reqSamplingRate;
@@ -4563,7 +4687,7 @@
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
if (thread != dstThread &&
- thread->type() != PlaybackThread::DIRECT) {
+ thread->type() != ThreadBase::DIRECT) {
MixerThread *srcThread = (MixerThread *)thread;
srcThread->invalidateTracks(stream);
}
@@ -4575,8 +4699,7 @@
int AudioFlinger::newAudioSessionId()
{
- AutoMutex _l(mLock);
- return nextUniqueId_l();
+ return nextUniqueId();
}
// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
@@ -4594,7 +4717,7 @@
{
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread != NULL) {
- if (thread->type() == PlaybackThread::DIRECT) {
+ if (thread->type() == ThreadBase::DIRECT) {
thread = NULL;
}
}
@@ -4611,12 +4734,34 @@
return thread;
}
-// nextUniqueId_l() must be called with AudioFlinger::mLock held
-int AudioFlinger::nextUniqueId_l()
+uint32_t AudioFlinger::nextUniqueId()
{
- return mNextUniqueId++;
+ return android_atomic_inc(&mNextUniqueId);
}
+AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l()
+{
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ if (thread->getOutput()->hwDev == mPrimaryHardwareDev) {
+ return thread;
+ }
+ }
+ return NULL;
+}
+
+uint32_t AudioFlinger::primaryOutputDevice_l()
+{
+ PlaybackThread *thread = primaryPlaybackThread_l();
+
+ if (thread == NULL) {
+ return 0;
+ }
+
+ return thread->device();
+}
+
+
// ----------------------------------------------------------------------------
// Effect management
// ----------------------------------------------------------------------------
@@ -4649,7 +4794,7 @@
effect_descriptor_t *pDesc,
const sp<IEffectClient>& effectClient,
int32_t priority,
- int output,
+ int io,
int sessionId,
status_t *status,
int *id,
@@ -4661,8 +4806,8 @@
sp<Client> client;
wp<Client> wclient;
- LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, output %d",
- pid, effectClient.get(), priority, sessionId, output);
+ LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, io %d",
+ pid, effectClient.get(), priority, sessionId, io);
if (pDesc == NULL) {
lStatus = BAD_VALUE;
@@ -4690,7 +4835,7 @@
goto Exit;
}
- if (output == 0) {
+ if (io == 0) {
if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
// output must be specified by AudioPolicyManager when using session
// AUDIO_SESSION_OUTPUT_STAGE
@@ -4698,9 +4843,9 @@
goto Exit;
} else if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
// if the output returned by getOutputForEffect() is removed before we lock the
- // mutex below, the call to checkPlaybackThread_l(output) below will detect it
+ // mutex below, the call to checkPlaybackThread_l(io) below will detect it
// and we will exit safely
- output = AudioSystem::getOutputForEffect(&desc);
+ io = AudioSystem::getOutputForEffect(&desc);
}
}
@@ -4777,30 +4922,40 @@
// output threads.
// If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX
// because of code checking output when entering the function.
- if (output == 0) {
+ // Note: io is never 0 when creating an effect on an input
+ if (io == 0) {
// look for the thread where the specified audio session is present
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
if (mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
- output = mPlaybackThreads.keyAt(i);
+ io = mPlaybackThreads.keyAt(i);
break;
}
}
+ if (io == 0) {
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+ io = mRecordThreads.keyAt(i);
+ break;
+ }
+ }
+ }
// If no output thread contains the requested session ID, default to
// first output. The effect chain will be moved to the correct output
// thread when a track with the same session ID is created
- if (output == 0 && mPlaybackThreads.size()) {
- output = mPlaybackThreads.keyAt(0);
+ if (io == 0 && mPlaybackThreads.size()) {
+ io = mPlaybackThreads.keyAt(0);
+ }
+ LOGV("createEffect() got io %d for effect %s", io, desc.name);
+ }
+ ThreadBase *thread = checkRecordThread_l(io);
+ if (thread == NULL) {
+ thread = checkPlaybackThread_l(io);
+ if (thread == NULL) {
+ LOGE("createEffect() unknown output thread");
+ lStatus = BAD_VALUE;
+ goto Exit;
}
}
- LOGV("createEffect() got output %d for effect %s", output, desc.name);
- PlaybackThread *thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
- LOGE("createEffect() unknown output thread");
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- // TODO: allow attachment of effect to inputs
wclient = mClients.valueFor(pid);
@@ -4909,8 +5064,9 @@
return NO_ERROR;
}
+
// PlaybackThread::createEffect_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l(
+sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
const sp<AudioFlinger::Client>& client,
const sp<IEffectClient>& effectClient,
int32_t priority,
@@ -4923,24 +5079,14 @@
sp<EffectModule> effect;
sp<EffectHandle> handle;
status_t lStatus;
- sp<Track> track;
sp<EffectChain> chain;
bool chainCreated = false;
bool effectCreated = false;
bool effectRegistered = false;
- if (mOutput == 0) {
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
LOGW("createEffect_l() Audio driver not initialized.");
- lStatus = NO_INIT;
- goto Exit;
- }
-
- // Do not allow auxiliary effect on session other than 0
- if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY &&
- sessionId != AUDIO_SESSION_OUTPUT_MIX) {
- LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
- desc->name, sessionId);
- lStatus = BAD_VALUE;
goto Exit;
}
@@ -4952,6 +5098,16 @@
lStatus = BAD_VALUE;
goto Exit;
}
+ // Only Pre processor effects are allowed on input threads and only on input threads
+ if ((mType == RECORD &&
+ (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) ||
+ (mType != RECORD &&
+ (desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+ LOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
+ desc->name, desc->flags, mType);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
LOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
@@ -4974,7 +5130,7 @@
LOGV("createEffect_l() got effect %p on chain %p", effect == 0 ? 0 : effect.get(), chain.get());
if (effect == 0) {
- int id = mAudioFlinger->nextUniqueId_l();
+ int id = mAudioFlinger->nextUniqueId();
// Check CPU and memory usage
lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
if (lStatus != NO_ERROR) {
@@ -5025,9 +5181,20 @@
return handle;
}
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
+{
+ sp<EffectModule> effect;
+
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ effect = chain->getEffectFromId_l(effectId);
+ }
+ return effect;
+}
+
// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
// PlaybackThread::mLock held
-status_t AudioFlinger::PlaybackThread::addEffect_l(const sp<EffectModule>& effect)
+status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
{
// check for existing effect chain with the requested audio session
int sessionId = effect->sessionId();
@@ -5063,7 +5230,7 @@
return NO_ERROR;
}
-void AudioFlinger::PlaybackThread::removeEffect_l(const sp<EffectModule>& effect) {
+void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
LOGV("removeEffect_l() %p effect %p", this, effect.get());
effect_descriptor_t desc = effect->desc();
@@ -5082,7 +5249,53 @@
}
}
-void AudioFlinger::PlaybackThread::disconnectEffect(const sp<EffectModule>& effect,
+void AudioFlinger::ThreadBase::lockEffectChains_l(
+ Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+{
+ effectChains = mEffectChains;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->lock();
+ }
+}
+
+void AudioFlinger::ThreadBase::unlockEffectChains(
+ Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+{
+ for (size_t i = 0; i < effectChains.size(); i++) {
+ effectChains[i]->unlock();
+ }
+}
+
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
+{
+ Mutex::Autolock _l(mLock);
+ return getEffectChain_l(sessionId);
+}
+
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId)
+{
+ sp<EffectChain> chain;
+
+ size_t size = mEffectChains.size();
+ for (size_t i = 0; i < size; i++) {
+ if (mEffectChains[i]->sessionId() == sessionId) {
+ chain = mEffectChains[i];
+ break;
+ }
+ }
+ return chain;
+}
+
+void AudioFlinger::ThreadBase::setMode(uint32_t mode)
+{
+ Mutex::Autolock _l(mLock);
+ size_t size = mEffectChains.size();
+ for (size_t i = 0; i < size; i++) {
+ mEffectChains[i]->setMode_l(mode);
+ }
+}
+
+void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect,
const wp<EffectHandle>& handle) {
Mutex::Autolock _l(mLock);
LOGV("disconnectEffect() %p effect %p", this, effect.get());
@@ -5188,35 +5401,6 @@
return mEffectChains.size();
}
-void AudioFlinger::PlaybackThread::lockEffectChains_l(
- Vector<sp <AudioFlinger::EffectChain> >& effectChains)
-{
- effectChains = mEffectChains;
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->lock();
- }
-}
-
-void AudioFlinger::PlaybackThread::unlockEffectChains(
- Vector<sp <AudioFlinger::EffectChain> >& effectChains)
-{
- for (size_t i = 0; i < effectChains.size(); i++) {
- effectChains[i]->unlock();
- }
-}
-
-
-sp<AudioFlinger::EffectModule> AudioFlinger::PlaybackThread::getEffect_l(int sessionId, int effectId)
-{
- sp<EffectModule> effect;
-
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- if (chain != 0) {
- effect = chain->getEffectFromId_l(effectId);
- }
- return effect;
-}
-
status_t AudioFlinger::PlaybackThread::attachAuxEffect(
const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
{
@@ -5257,6 +5441,34 @@
}
}
+status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
+{
+ // only one chain per input thread
+ if (mEffectChains.size() != 0) {
+ return INVALID_OPERATION;
+ }
+ LOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
+
+ chain->setInBuffer(NULL);
+ chain->setOutBuffer(NULL);
+
+ mEffectChains.add(chain);
+
+ return NO_ERROR;
+}
+
+size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain)
+{
+ LOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
+ LOGW_IF(mEffectChains.size() != 1,
+ "removeEffectChain_l() %p invalid chain size %d on thread %p",
+ chain.get(), mEffectChains.size(), this);
+ if (mEffectChains.size() == 1) {
+ mEffectChains.removeAt(0);
+ }
+ return 0;
+}
+
// ----------------------------------------------------------------------------
// EffectModule implementation
// ----------------------------------------------------------------------------
@@ -5278,12 +5490,11 @@
if (thread == 0) {
return;
}
- PlaybackThread *p = (PlaybackThread *)thread.get();
memcpy(&mDescriptor, desc, sizeof(effect_descriptor_t));
// create effect engine from effect factory
- mStatus = EffectCreate(&desc->uuid, sessionId, p->id(), &mEffectInterface);
+ mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
if (mStatus != NO_ERROR) {
return;
@@ -5306,6 +5517,13 @@
{
LOGV("Destructor %p", this);
if (mEffectInterface != NULL) {
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ thread->stream()->remove_audio_effect(thread->stream(), mEffectInterface);
+ }
+ }
// release effect engine
EffectRelease(mEffectInterface);
}
@@ -5381,8 +5599,7 @@
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- playbackThread->disconnectEffect(keep, handle);
+ thread->disconnectEffect(keep, handle);
}
}
}
@@ -5592,6 +5809,14 @@
if (status == 0) {
status = cmdStatus;
}
+ if (status == 0 &&
+ ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ thread->stream()->add_audio_effect(thread->stream(), mEffectInterface);
+ }
+ }
return status;
}
@@ -5611,6 +5836,14 @@
if (status == 0) {
status = cmdStatus;
}
+ if (status == 0 &&
+ ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ thread->stream()->remove_audio_effect(thread->stream(), mEffectInterface);
+ }
+ }
return status;
}
@@ -5750,17 +5983,41 @@
{
Mutex::Autolock _l(mLock);
status_t status = NO_ERROR;
- if ((mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_DEVICE,
- sizeof(uint32_t),
- &device,
- &size,
- &cmdStatus);
- if (status == NO_ERROR) {
- status = cmdStatus;
+ if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
+ // audio pre processing modules on RecordThread can receive both output and
+ // input device indication in the same call
+ uint32_t dev = device & AUDIO_DEVICE_OUT_ALL;
+ if (dev) {
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+
+ status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_DEVICE,
+ sizeof(uint32_t),
+ &dev,
+ &size,
+ &cmdStatus);
+ if (status == NO_ERROR) {
+ status = cmdStatus;
+ }
+ }
+ dev = device & AUDIO_DEVICE_IN_ALL;
+ if (dev) {
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+
+ status_t status2 = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_INPUT_DEVICE,
+ sizeof(uint32_t),
+ &dev,
+ &size,
+ &cmdStatus);
+ if (status2 == NO_ERROR) {
+ status2 = cmdStatus;
+ }
+ if (status == NO_ERROR) {
+ status = status2;
+ }
}
}
return status;
@@ -6134,7 +6391,6 @@
LOGW("process_l(): cannot promote mixer thread");
return;
}
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) ||
(mSessionId == AUDIO_SESSION_OUTPUT_STAGE);
bool tracksOnSession = false;
@@ -6146,7 +6402,7 @@
// will not do it
if (tracksOnSession &&
activeTrackCnt() == 0) {
- size_t numSamples = playbackThread->frameCount() * playbackThread->channelCount();
+ size_t numSamples = thread->frameCount() * thread->channelCount();
memset(mInBuffer, 0, numSamples * sizeof(int16_t));
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1fad987..fff4f06 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -157,7 +157,7 @@
effect_descriptor_t *pDesc,
const sp<IEffectClient>& effectClient,
int32_t priority,
- int output,
+ int io,
int sessionId,
status_t *status,
int *id,
@@ -273,9 +273,17 @@
class ThreadBase : public Thread {
public:
- ThreadBase (const sp<AudioFlinger>& audioFlinger, int id);
+ ThreadBase (const sp<AudioFlinger>& audioFlinger, int id, uint32_t device);
virtual ~ThreadBase();
+
+ enum type {
+ MIXER, // Thread class is MixerThread
+ DIRECT, // Thread class is DirectOutputThread
+ DUPLICATING, // Thread class is DuplicatingThread
+ RECORD // Thread class is RecordThread
+ };
+
status_t dumpBase(int fd, const Vector<String16>& args);
// base for record and playback
@@ -377,6 +385,8 @@
int mParam;
};
+ virtual status_t initCheck() const = 0;
+ int type() const { return mType; }
uint32_t sampleRate() const;
int channelCount() const;
uint32_t format() const;
@@ -392,6 +402,60 @@
void processConfigEvents();
int id() const { return mId;}
bool standby() { return mStandby; }
+ uint32_t device() { return mDevice; }
+ virtual audio_stream_t* stream() = 0;
+
+ sp<EffectHandle> createEffect_l(
+ const sp<AudioFlinger::Client>& client,
+ const sp<IEffectClient>& effectClient,
+ int32_t priority,
+ int sessionId,
+ effect_descriptor_t *desc,
+ int *enabled,
+ status_t *status);
+ void disconnectEffect(const sp< EffectModule>& effect,
+ const wp<EffectHandle>& handle);
+
+ // return values for hasAudioSession (bit field)
+ enum effect_state {
+ EFFECT_SESSION = 0x1, // the audio session corresponds to at least one
+ // effect
+ TRACK_SESSION = 0x2 // the audio session corresponds to at least one
+ // track
+ };
+
+ // get effect chain corresponding to session Id.
+ sp<EffectChain> getEffectChain(int sessionId);
+ // same as getEffectChain() but must be called with ThreadBase mutex locked
+ sp<EffectChain> getEffectChain_l(int sessionId);
+ // add an effect chain to the chain list (mEffectChains)
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
+ // remove an effect chain from the chain list (mEffectChains)
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0;
+ // lock mall effect chains Mutexes. Must be called before releasing the
+ // ThreadBase mutex before processing the mixer and effects. This guarantees the
+ // integrity of the chains during the process.
+ void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains);
+ // unlock effect chains after process
+ void unlockEffectChains(Vector<sp <EffectChain> >& effectChains);
+ // set audio mode to all effect chains
+ void setMode(uint32_t mode);
+ // get effect module with corresponding ID on specified audio session
+ sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
+ // add and effect module. Also creates the effect chain is none exists for
+ // the effects audio session
+ status_t addEffect_l(const sp< EffectModule>& effect);
+ // remove and effect module. Also removes the effect chain is this was the last
+ // effect
+ void removeEffect_l(const sp< EffectModule>& effect);
+ // detach all tracks connected to an auxiliary effect
+ virtual void detachAuxEffect_l(int effectId) {}
+ // returns either EFFECT_SESSION if effects on this audio session exist in one
+ // chain, or TRACK_SESSION if tracks on this audio session exist, or both
+ virtual uint32_t hasAudioSession(int sessionId) = 0;
+ // the value returned by default implementation is not important as the
+ // strategy is only meaningful for PlaybackThread which implements this method
+ virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; }
mutable Mutex mLock;
@@ -406,6 +470,7 @@
friend class RecordThread;
friend class RecordTrack;
+ int mType;
Condition mWaitWorkCV;
sp<AudioFlinger> mAudioFlinger;
uint32_t mSampleRate;
@@ -421,18 +486,15 @@
bool mStandby;
int mId;
bool mExiting;
+ Vector< sp<EffectChain> > mEffectChains;
+ uint32_t mDevice; // output device for PlaybackThread
+ // input + output devices for RecordThread
};
// --- PlaybackThread ---
class PlaybackThread : public ThreadBase {
public:
- enum type {
- MIXER,
- DIRECT,
- DUPLICATING
- };
-
enum mixer_state {
MIXER_IDLE,
MIXER_TRACKS_ENABLED,
@@ -569,6 +631,8 @@
virtual status_t readyToRun();
virtual void onFirstRef();
+ virtual status_t initCheck() const { return (mOutput == 0) ? NO_INIT : NO_ERROR; }
+
virtual uint32_t latency() const;
virtual status_t setMasterVolume(float value);
@@ -595,8 +659,8 @@
status_t *status);
AudioStreamOut* getOutput() { return mOutput; }
+ virtual audio_stream_t* stream() { return &mOutput->stream->common; }
- virtual int type() const { return mType; }
void suspend() { mSuspended++; }
void restore() { if (mSuspended) mSuspended--; }
bool isSuspended() { return (mSuspended != 0); }
@@ -605,45 +669,16 @@
virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
int16_t *mixBuffer() { return mMixBuffer; };
- sp<EffectHandle> createEffect_l(
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority,
- int sessionId,
- effect_descriptor_t *desc,
- int *enabled,
- status_t *status);
- void disconnectEffect(const sp< EffectModule>& effect,
- const wp<EffectHandle>& handle);
-
- // return values for hasAudioSession (bit field)
- enum effect_state {
- EFFECT_SESSION = 0x1, // the audio session corresponds to at least one
- // effect
- TRACK_SESSION = 0x2 // the audio session corresponds to at least one
- // track
- };
-
- uint32_t hasAudioSession(int sessionId);
- sp<EffectChain> getEffectChain(int sessionId);
- sp<EffectChain> getEffectChain_l(int sessionId);
- status_t addEffectChain_l(const sp<EffectChain>& chain);
- size_t removeEffectChain_l(const sp<EffectChain>& chain);
- void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains);
- void unlockEffectChains(Vector<sp <EffectChain> >& effectChains);
-
- sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
- void detachAuxEffect_l(int effectId);
+ virtual void detachAuxEffect_l(int effectId);
status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
int EffectId);
status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track,
int EffectId);
- void setMode(uint32_t mode);
- status_t addEffect_l(const sp< EffectModule>& effect);
- void removeEffect_l(const sp< EffectModule>& effect);
-
- uint32_t getStrategyForSession_l(int sessionId);
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual uint32_t hasAudioSession(int sessionId);
+ virtual uint32_t getStrategyForSession_l(int sessionId);
struct stream_type_t {
stream_type_t()
@@ -656,7 +691,6 @@
};
protected:
- int mType;
int16_t* mMixBuffer;
int mSuspended;
int mBytesWritten;
@@ -688,8 +722,6 @@
void readOutputParameters();
- uint32_t device() { return mDevice; }
-
virtual status_t dumpInternals(int fd, const Vector<String16>& args);
status_t dumpTracks(int fd, const Vector<String16>& args);
status_t dumpEffectChains(int fd, const Vector<String16>& args);
@@ -703,8 +735,6 @@
int mNumWrites;
int mNumDelayedWrites;
bool mInWrite;
- Vector< sp<EffectChain> > mEffectChains;
- uint32_t mDevice;
};
class MixerThread : public PlaybackThread {
@@ -788,11 +818,13 @@
float streamVolumeInternal(int stream) const { return mStreamTypes[stream].volume; }
void audioConfigChanged_l(int event, int ioHandle, void *param2);
- int nextUniqueId_l();
+ uint32_t nextUniqueId();
status_t moveEffectChain_l(int session,
AudioFlinger::PlaybackThread *srcThread,
AudioFlinger::PlaybackThread *dstThread,
bool reRegister);
+ PlaybackThread *primaryPlaybackThread_l();
+ uint32_t primaryOutputDevice_l();
friend class AudioBuffer;
@@ -864,18 +896,33 @@
AudioStreamIn *input,
uint32_t sampleRate,
uint32_t channels,
- int id);
+ int id,
+ uint32_t device);
~RecordThread();
virtual bool threadLoop();
virtual status_t readyToRun() { return NO_ERROR; }
virtual void onFirstRef();
+ virtual status_t initCheck() const { return (mInput == 0) ? NO_INIT : NO_ERROR; }
+ sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ uint32_t sampleRate,
+ int format,
+ int channelMask,
+ int frameCount,
+ uint32_t flags,
+ int sessionId,
+ status_t *status);
+
status_t start(RecordTrack* recordTrack);
void stop(RecordTrack* recordTrack);
status_t dump(int fd, const Vector<String16>& args);
AudioStreamIn* getInput() { return mInput; }
+ virtual audio_stream_t* stream() { return &mInput->stream->common; }
+
+ void setTrack(RecordTrack *recordTrack) { mTrack = recordTrack; }
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
virtual bool checkForNewParameters_l();
@@ -884,9 +931,14 @@
void readInputParameters();
virtual unsigned int getInputFramesLost();
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual uint32_t hasAudioSession(int sessionId);
+
private:
RecordThread();
AudioStreamIn *mInput;
+ RecordTrack* mTrack;
sp<RecordTrack> mActiveTrack;
Condition mStartStopCond;
AudioResampler *mResampler;
@@ -1103,9 +1155,8 @@
status_t addEffect_l(const sp<EffectModule>& handle);
size_t removeEffect_l(const sp<EffectModule>& handle);
- int sessionId() {
- return mSessionId;
- }
+ int sessionId() { return mSessionId; }
+ void setSessionId(int sessionId) { mSessionId = sessionId; }
sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
sp<EffectModule> getEffectFromId_l(int id);
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 8e16d94..dd1e153 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -33,11 +33,14 @@
#include <cutils/properties.h>
#include <dlfcn.h>
#include <hardware_legacy/power.h>
+#include <media/AudioEffect.h>
+#include <media/EffectsFactoryApi.h>
#include <hardware/hardware.h>
#include <system/audio.h>
#include <system/audio_policy.h>
#include <hardware/audio_policy.h>
+#include <audio_effects/audio_effects_conf.h>
namespace android {
@@ -101,6 +104,13 @@
mpAudioPolicy->set_can_mute_enforced_audible(mpAudioPolicy, !forced_val);
LOGI("Loaded audio policy from %s (%s)", module->name, module->id);
+
+ // load audio pre processing modules
+ if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
+ loadPreProcessorConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
+ } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
+ loadPreProcessorConfig(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
+ }
}
AudioPolicyService::~AudioPolicyService()
@@ -110,6 +120,31 @@
mAudioCommandThread->exit();
mAudioCommandThread.clear();
+
+ // release audio pre processing resources
+ for (size_t i = 0; i < mInputSources.size(); i++) {
+ InputSourceDesc *source = mInputSources.valueAt(i);
+ Vector <EffectDesc *> effects = source->mEffects;
+ for (size_t j = 0; j < effects.size(); j++) {
+ delete effects[j]->mName;
+ Vector <effect_param_t *> params = effects[j]->mParams;
+ for (size_t k = 0; k < params.size(); k++) {
+ delete params[k];
+ }
+ params.clear();
+ delete effects[j];
+ }
+ effects.clear();
+ delete source;
+ }
+ mInputSources.clear();
+
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ mInputs.valueAt(i)->mEffects.clear();
+ delete mInputs.valueAt(i);
+ }
+ mInputs.clear();
+
if (mpAudioPolicy && mpAudioPolicyDev)
mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
if (mpAudioPolicyDev)
@@ -276,13 +311,51 @@
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- audio_in_acoustics_t acoustics)
+ audio_in_acoustics_t acoustics,
+ int audioSession)
{
if (mpAudioPolicy == NULL) {
return 0;
}
Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, format, channels, acoustics);
+ audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
+ format, channels, acoustics);
+
+ if (input == 0) {
+ return input;
+ }
+ // create audio pre processors according to input source
+ ssize_t index = mInputSources.indexOfKey((audio_source_t)inputSource);
+ if (index < 0) {
+ return input;
+ }
+ ssize_t idx = mInputs.indexOfKey(input);
+ InputDesc *inputDesc;
+ if (idx < 0) {
+ inputDesc = new InputDesc();
+ inputDesc->mSessionId = audioSession;
+ mInputs.add(input, inputDesc);
+ } else {
+ inputDesc = mInputs.valueAt(idx);
+ }
+
+ Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+ for (size_t i = 0; i < effects.size(); i++) {
+ EffectDesc *effect = effects[i];
+ sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
+ status_t status = fx->initCheck();
+ if (status != NO_ERROR && status != ALREADY_EXISTS) {
+ LOGW("Failed to create Fx %s on input %d", effect->mName, input);
+ // fx goes out of scope and strong ref on AudioEffect is released
+ continue;
+ }
+ for (size_t j = 0; j < effect->mParams.size(); j++) {
+ fx->setParameter(effect->mParams[j]);
+ }
+ inputDesc->mEffects.add(fx);
+ }
+ setPreProcessorEnabled(inputDesc, true);
+ return input;
}
status_t AudioPolicyService::startInput(audio_io_handle_t input)
@@ -291,6 +364,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+
return mpAudioPolicy->start_input(mpAudioPolicy, input);
}
@@ -300,6 +374,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+
return mpAudioPolicy->stop_input(mpAudioPolicy, input);
}
@@ -310,6 +385,16 @@
}
Mutex::Autolock _l(mLock);
mpAudioPolicy->release_input(mpAudioPolicy, input);
+
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ return;
+ }
+ InputDesc *inputDesc = mInputs.valueAt(index);
+ setPreProcessorEnabled(inputDesc, false);
+ inputDesc->mEffects.clear();
+ delete inputDesc;
+ mInputs.removeItemsAt(index);
}
status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
@@ -384,7 +469,7 @@
}
status_t AudioPolicyService::registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id)
@@ -392,7 +477,7 @@
if (mpAudioPolicy == NULL) {
return NO_INIT;
}
- return mpAudioPolicy->register_effect(mpAudioPolicy, desc, output, strategy, session, id);
+ return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
}
status_t AudioPolicyService::unregisterEffect(int id)
@@ -489,6 +574,15 @@
return NO_ERROR;
}
+void AudioPolicyService::setPreProcessorEnabled(InputDesc *inputDesc, bool enabled)
+{
+ Vector<sp<AudioEffect> > fxVector = inputDesc->mEffects;
+ for (size_t i = 0; i < fxVector.size(); i++) {
+ sp<AudioEffect> fx = fxVector.itemAt(i);
+ fx->setEnabled(enabled);
+ }
+}
+
status_t AudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
@@ -918,6 +1012,300 @@
return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
}
+// ----------------------------------------------------------------------------
+// Audio pre-processing configuration
+// ----------------------------------------------------------------------------
+
+const char *AudioPolicyService::kInputSourceNames[AUDIO_SOURCE_CNT -1] = {
+ MIC_SRC_TAG,
+ VOICE_UL_SRC_TAG,
+ VOICE_DL_SRC_TAG,
+ VOICE_CALL_SRC_TAG,
+ CAMCORDER_SRC_TAG,
+ VOICE_REC_SRC_TAG,
+ VOICE_COMM_SRC_TAG
+};
+
+// returns the audio_source_t enum corresponding to the input source name or
+// AUDIO_SOURCE_CNT is no match found
+audio_source_t AudioPolicyService::inputSourceNameToEnum(const char *name)
+{
+ int i;
+ for (i = AUDIO_SOURCE_MIC; i < AUDIO_SOURCE_CNT; i++) {
+ if (strcmp(name, kInputSourceNames[i - AUDIO_SOURCE_MIC]) == 0) {
+ LOGV("inputSourceNameToEnum found source %s %d", name, i);
+ break;
+ }
+ }
+ return (audio_source_t)i;
+}
+
+size_t AudioPolicyService::growParamSize(char *param,
+ size_t size,
+ size_t *curSize,
+ size_t *totSize)
+{
+ // *curSize is at least sizeof(effect_param_t) + 2 * sizeof(int)
+ size_t pos = ((*curSize - 1 ) / size + 1) * size;
+
+ if (pos + size > *totSize) {
+ while (pos + size > *totSize) {
+ *totSize += ((*totSize + 7) / 8) * 4;
+ }
+ param = (char *)realloc(param, *totSize);
+ }
+ *curSize = pos + size;
+ return pos;
+}
+
+size_t AudioPolicyService::readParamValue(cnode *node,
+ char *param,
+ size_t *curSize,
+ size_t *totSize)
+{
+ if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) {
+ size_t pos = growParamSize(param, sizeof(short), curSize, totSize);
+ *(short *)((char *)param + pos) = (short)atoi(node->value);
+ LOGV("readParamValue() reading short %d", *(short *)((char *)param + pos));
+ return sizeof(short);
+ } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) {
+ size_t pos = growParamSize(param, sizeof(int), curSize, totSize);
+ *(int *)((char *)param + pos) = atoi(node->value);
+ LOGV("readParamValue() reading int %d", *(int *)((char *)param + pos));
+ return sizeof(int);
+ } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) {
+ size_t pos = growParamSize(param, sizeof(float), curSize, totSize);
+ *(float *)((char *)param + pos) = (float)atof(node->value);
+ LOGV("readParamValue() reading float %f",*(float *)((char *)param + pos));
+ return sizeof(float);
+ } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) {
+ size_t pos = growParamSize(param, sizeof(bool), curSize, totSize);
+ if (strncmp(node->value, "false", strlen("false") + 1) == 0) {
+ *(bool *)((char *)param + pos) = false;
+ } else {
+ *(bool *)((char *)param + pos) = true;
+ }
+ LOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false");
+ return sizeof(bool);
+ } else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) {
+ size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX);
+ if (*curSize + len + 1 > *totSize) {
+ *totSize = *curSize + len + 1;
+ param = (char *)realloc(param, *totSize);
+ }
+ strncpy(param + *curSize, node->value, len);
+ *curSize += len;
+ param[*curSize] = '\0';
+ LOGV("readParamValue() reading string %s", param + *curSize - len);
+ return len;
+ }
+ LOGW("readParamValue() unknown param type %s", node->name);
+ return 0;
+}
+
+effect_param_t *AudioPolicyService::loadEffectParameter(cnode *root)
+{
+ cnode *param;
+ cnode *value;
+ size_t curSize = sizeof(effect_param_t);
+ size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int);
+ effect_param_t *fx_param = (effect_param_t *)malloc(totSize);
+
+ param = config_find(root, PARAM_TAG);
+ value = config_find(root, VALUE_TAG);
+ if (param == NULL && value == NULL) {
+ // try to parse simple parameter form {int int}
+ param = root->first_child;
+ if (param) {
+ // Note: that a pair of random strings is read as 0 0
+ int *ptr = (int *)fx_param->data;
+ int *ptr2 = (int *)((char *)param + sizeof(effect_param_t));
+ LOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2);
+ *ptr++ = atoi(param->name);
+ *ptr = atoi(param->value);
+ fx_param->psize = sizeof(int);
+ fx_param->vsize = sizeof(int);
+ return fx_param;
+ }
+ }
+ if (param == NULL || value == NULL) {
+ LOGW("loadEffectParameter() invalid parameter description %s", root->name);
+ goto error;
+ }
+
+ fx_param->psize = 0;
+ param = param->first_child;
+ while (param) {
+ LOGV("loadEffectParameter() reading param of type %s", param->name);
+ size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize);
+ if (size == 0) {
+ goto error;
+ }
+ fx_param->psize += size;
+ param = param->next;
+ }
+
+ // align start of value field on 32 bit boundary
+ curSize = ((curSize - 1 ) / sizeof(int) + 1) * sizeof(int);
+
+ fx_param->vsize = 0;
+ value = value->first_child;
+ while (value) {
+ LOGV("loadEffectParameter() reading value of type %s", value->name);
+ size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize);
+ if (size == 0) {
+ goto error;
+ }
+ fx_param->vsize += size;
+ value = value->next;
+ }
+
+ return fx_param;
+
+error:
+ delete fx_param;
+ return NULL;
+}
+
+void AudioPolicyService::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params)
+{
+ cnode *node = root->first_child;
+ while (node) {
+ LOGV("loadEffectParameters() loading param %s", node->name);
+ effect_param_t *param = loadEffectParameter(node);
+ if (param == NULL) {
+ node = node->next;
+ continue;
+ }
+ params.add(param);
+ node = node->next;
+ }
+}
+
+AudioPolicyService::InputSourceDesc *AudioPolicyService::loadInputSource(
+ cnode *root,
+ const Vector <EffectDesc *>& effects)
+{
+ cnode *node = root->first_child;
+ if (node == NULL) {
+ LOGW("loadInputSource() empty element %s", root->name);
+ return NULL;
+ }
+ InputSourceDesc *source = new InputSourceDesc();
+ while (node) {
+ size_t i;
+ for (i = 0; i < effects.size(); i++) {
+ if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) {
+ LOGV("loadInputSource() found effect %s in list", node->name);
+ break;
+ }
+ }
+ if (i == effects.size()) {
+ LOGV("loadInputSource() effect %s not in list", node->name);
+ node = node->next;
+ continue;
+ }
+ EffectDesc *effect = new EffectDesc(*effects[i]);
+ loadEffectParameters(node, effect->mParams);
+ LOGV("loadInputSource() adding effect %s uuid %08x", effect->mName, effect->mUuid.timeLow);
+ source->mEffects.add(effect);
+ node = node->next;
+ }
+ if (source->mEffects.size() == 0) {
+ LOGW("loadInputSource() no valid effects found in source %s", root->name);
+ delete source;
+ return NULL;
+ }
+ return source;
+}
+
+status_t AudioPolicyService::loadInputSources(cnode *root, const Vector <EffectDesc *>& effects)
+{
+ cnode *node = config_find(root, PREPROCESSING_TAG);
+ if (node == NULL) {
+ return -ENOENT;
+ }
+ node = node->first_child;
+ while (node) {
+ audio_source_t source = inputSourceNameToEnum(node->name);
+ if (source == AUDIO_SOURCE_CNT) {
+ LOGW("loadInputSources() invalid input source %s", node->name);
+ node = node->next;
+ continue;
+ }
+ LOGV("loadInputSources() loading input source %s", node->name);
+ InputSourceDesc *desc = loadInputSource(node, effects);
+ if (desc == NULL) {
+ node = node->next;
+ continue;
+ }
+ mInputSources.add(source, desc);
+ node = node->next;
+ }
+ return NO_ERROR;
+}
+
+AudioPolicyService::EffectDesc *AudioPolicyService::loadEffect(cnode *root)
+{
+ cnode *node = config_find(root, UUID_TAG);
+ if (node == NULL) {
+ return NULL;
+ }
+ effect_uuid_t uuid;
+ if (AudioEffect::stringToGuid(node->value, &uuid) != NO_ERROR) {
+ LOGW("loadEffect() invalid uuid %s", node->value);
+ return NULL;
+ }
+ EffectDesc *effect = new EffectDesc();
+ effect->mName = strdup(root->name);
+ memcpy(&effect->mUuid, &uuid, sizeof(effect_uuid_t));
+
+ return effect;
+}
+
+status_t AudioPolicyService::loadEffects(cnode *root, Vector <EffectDesc *>& effects)
+{
+ cnode *node = config_find(root, EFFECTS_TAG);
+ if (node == NULL) {
+ return -ENOENT;
+ }
+ node = node->first_child;
+ while (node) {
+ LOGV("loadEffects() loading effect %s", node->name);
+ EffectDesc *effect = loadEffect(node);
+ if (effect == NULL) {
+ node = node->next;
+ continue;
+ }
+ effects.add(effect);
+ node = node->next;
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::loadPreProcessorConfig(const char *path)
+{
+ cnode *root;
+ char *data;
+
+ data = (char *)load_file(path, NULL);
+ if (data == NULL) {
+ return -ENODEV;
+ }
+ root = config_node("", "");
+ config_load(root, data);
+
+ Vector <EffectDesc *> effects;
+ loadEffects(root, effects);
+ loadInputSources(root, effects);
+
+ config_free(root);
+ free(root);
+ free(data);
+
+ return NO_ERROR;
+}
+
/* implementation of the interface to the policy manager */
extern "C" {
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index b830120..62ad29e 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -17,14 +17,17 @@
#ifndef ANDROID_AUDIOPOLICYSERVICE_H
#define ANDROID_AUDIOPOLICYSERVICE_H
-#include <media/IAudioPolicyService.h>
-#include <media/ToneGenerator.h>
+#include <cutils/misc.h>
+#include <cutils/config_utils.h>
#include <utils/Vector.h>
+#include <utils/SortedVector.h>
#include <binder/BinderService.h>
-
#include <system/audio.h>
#include <system/audio_policy.h>
#include <hardware/audio_policy.h>
+#include <media/IAudioPolicyService.h>
+#include <media/ToneGenerator.h>
+#include <media/AudioEffect.h>
namespace android {
@@ -78,7 +81,8 @@
uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
audio_in_acoustics_t acoustics =
- (audio_in_acoustics_t)0);
+ (audio_in_acoustics_t)0,
+ int audioSession = 0);
virtual status_t startInput(audio_io_handle_t input);
virtual status_t stopInput(audio_io_handle_t input);
virtual void releaseInput(audio_io_handle_t input);
@@ -93,7 +97,7 @@
virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
virtual status_t registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
+ audio_io_handle_t io,
uint32_t strategy,
int session,
int id);
@@ -218,6 +222,51 @@
String8 mName; // string used by wake lock fo delayed commands
};
+ class EffectDesc {
+ public:
+ EffectDesc() {}
+ virtual ~EffectDesc() {}
+ char *mName;
+ effect_uuid_t mUuid;
+ Vector <effect_param_t *> mParams;
+ };
+
+ class InputSourceDesc {
+ public:
+ InputSourceDesc() {}
+ virtual ~InputSourceDesc() {}
+ Vector <EffectDesc *> mEffects;
+ };
+
+
+ class InputDesc {
+ public:
+ InputDesc() {}
+ virtual ~InputDesc() {}
+ int mSessionId;
+ Vector< sp<AudioEffect> >mEffects;
+ };
+
+ static const char *kInputSourceNames[AUDIO_SOURCE_CNT -1];
+
+ void setPreProcessorEnabled(InputDesc *inputDesc, bool enabled);
+ status_t loadPreProcessorConfig(const char *path);
+ status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects);
+ EffectDesc *loadEffect(cnode *root);
+ status_t loadInputSources(cnode *root, const Vector <EffectDesc *>& effects);
+ audio_source_t inputSourceNameToEnum(const char *name);
+ InputSourceDesc *loadInputSource(cnode *root, const Vector <EffectDesc *>& effects);
+ void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params);
+ effect_param_t *loadEffectParameter(cnode *root);
+ size_t readParamValue(cnode *node,
+ char *param,
+ size_t *curSize,
+ size_t *totSize);
+ size_t growParamSize(char *param,
+ size_t size,
+ size_t *curSize,
+ size_t *totSize);
+
// Internal dump utilities.
status_t dumpPermissionDenial(int fd);
@@ -226,9 +275,10 @@
// device connection state or routing
sp <AudioCommandThread> mAudioCommandThread; // audio commands thread
sp <AudioCommandThread> mTonePlaybackThread; // tone playback thread
-
struct audio_policy_device *mpAudioPolicyDev;
struct audio_policy *mpAudioPolicy;
+ KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
+ KeyedVector< audio_io_handle_t, InputDesc* > mInputs;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 9b09983..96b26e7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -458,6 +458,17 @@
return NO_ERROR;
}
+static void disconnectWindow(const sp<ANativeWindow>& window) {
+ if (window != 0) {
+ status_t result = native_window_disconnect(window.get(),
+ NATIVE_WINDOW_API_CAMERA);
+ if (result != NO_ERROR) {
+ LOGW("native_window_disconnect failed: %s (%d)", strerror(-result),
+ result);
+ }
+ }
+}
+
void CameraService::Client::disconnect() {
int callingPid = getCallingPid();
LOG1("disconnect E (pid %d)", callingPid);
@@ -489,6 +500,7 @@
// Release the held ANativeWindow resources.
if (mPreviewWindow != 0) {
+ disconnectWindow(mPreviewWindow);
mPreviewWindow = 0;
mHardware->setPreviewWindow(mPreviewWindow);
}
@@ -502,77 +514,73 @@
// ----------------------------------------------------------------------------
-// set the Surface that the preview will use
-status_t CameraService::Client::setPreviewDisplay(const sp<Surface>& surface) {
- LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
+status_t CameraService::Client::setPreviewWindow(const sp<IBinder>& binder,
+ const sp<ANativeWindow>& window) {
Mutex::Autolock lock(mLock);
status_t result = checkPidAndHardware();
if (result != NO_ERROR) return result;
- result = NO_ERROR;
-
// return if no change in surface.
- sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0);
if (binder == mSurface) {
- return result;
+ return NO_ERROR;
}
- if (mSurface != 0) {
- LOG1("clearing old preview surface %p", mSurface.get());
- }
- mSurface = binder;
- mPreviewWindow = surface;
-
- // If preview has been already started, register preview
- // buffers now.
- if (mHardware->previewEnabled()) {
- if (mPreviewWindow != 0) {
- native_window_set_buffers_transform(mPreviewWindow.get(),
- mOrientation);
- result = mHardware->setPreviewWindow(mPreviewWindow);
+ if (window != 0) {
+ result = native_window_connect(window.get(), NATIVE_WINDOW_API_CAMERA);
+ if (result != NO_ERROR) {
+ LOGE("native_window_connect failed: %s (%d)", strerror(-result),
+ result);
+ return result;
}
}
+ // If preview has been already started, register preview buffers now.
+ if (mHardware->previewEnabled()) {
+ if (window != 0) {
+ native_window_set_scaling_mode(window.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ native_window_set_buffers_transform(window.get(), mOrientation);
+ result = mHardware->setPreviewWindow(window);
+ }
+ }
+
+ if (result == NO_ERROR) {
+ // Everything has succeeded. Disconnect the old window and remember the
+ // new window.
+ disconnectWindow(mPreviewWindow);
+ mSurface = binder;
+ mPreviewWindow = window;
+ } else {
+ // Something went wrong after we connected to the new window, so
+ // disconnect here.
+ disconnectWindow(window);
+ }
+
return result;
}
+// set the Surface that the preview will use
+status_t CameraService::Client::setPreviewDisplay(const sp<Surface>& surface) {
+ LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
+
+ sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0);
+ sp<ANativeWindow> window(surface);
+ return setPreviewWindow(binder, window);
+}
+
// set the SurfaceTexture that the preview will use
status_t CameraService::Client::setPreviewTexture(
const sp<ISurfaceTexture>& surfaceTexture) {
LOG1("setPreviewTexture(%p) (pid %d)", surfaceTexture.get(),
getCallingPid());
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
- // return if no change in surface.
- // asBinder() is safe on NULL (returns NULL)
- if (surfaceTexture->asBinder() == mSurface) {
- return result;
- }
-
- if (mSurface != 0) {
- LOG1("clearing old preview surface %p", mSurface.get());
- }
- mSurface = surfaceTexture->asBinder();
+ sp<IBinder> binder;
+ sp<ANativeWindow> window;
if (surfaceTexture != 0) {
- mPreviewWindow = new SurfaceTextureClient(surfaceTexture);
- } else {
- mPreviewWindow = 0;
+ binder = surfaceTexture->asBinder();
+ window = new SurfaceTextureClient(surfaceTexture);
}
-
- // If preview has been already started, set overlay or register preview
- // buffers now.
- if (mHardware->previewEnabled()) {
- // XXX: What if the new preview window is 0?
- if (mPreviewWindow != 0) {
- native_window_set_buffers_transform(mPreviewWindow.get(),
- mOrientation);
- result = mHardware->setPreviewWindow(mPreviewWindow);
- }
- }
-
- return result;
+ return setPreviewWindow(binder, window);
}
// set the preview callback flag to affect how the received frames from
@@ -637,6 +645,8 @@
}
if (mPreviewWindow != 0) {
+ native_window_set_scaling_mode(mPreviewWindow.get(),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
native_window_set_buffers_transform(mPreviewWindow.get(),
mOrientation);
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 5e2d571..c5fefb8 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -161,6 +161,10 @@
int getOrientation(int orientation, bool mirror);
+ status_t setPreviewWindow(
+ const sp<IBinder>& binder,
+ const sp<ANativeWindow>& window);
+
// these are initialized in the constructor.
sp<CameraService> mCameraService; // immutable after constructor
sp<ICameraClient> mCameraClient;