Merge "Match AHardwareBuffer format name change" into oc-dev
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 46a95c5..5c11bfa 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -67,6 +67,9 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj_arm/SHARED_LIBRARIES/liboboe*)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/bin/mediacodec)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/init/mediacodec.rc)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libeffects.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/libeffects.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libeffects_intermediates)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/drm/mediacas/plugins/clearkey/Android.mk b/drm/mediacas/plugins/clearkey/Android.mk
index 0c2b357..8fd866c 100644
--- a/drm/mediacas/plugins/clearkey/Android.mk
+++ b/drm/mediacas/plugins/clearkey/Android.mk
@@ -28,7 +28,8 @@
LOCAL_MODULE := libclearkeycasplugin
-LOCAL_PROPRIETARY_MODULE := true
+#TODO: move this back to /vendor/lib after conversion to treble
+#LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := mediacas
LOCAL_SHARED_LIBRARIES := \
diff --git a/drm/mediacas/plugins/clearkey/tests/Android.mk b/drm/mediacas/plugins/clearkey/tests/Android.mk
index 5418c1d..cbf7be7 100644
--- a/drm/mediacas/plugins/clearkey/tests/Android.mk
+++ b/drm/mediacas/plugins/clearkey/tests/Android.mk
@@ -26,7 +26,7 @@
# the plugin is not in standard library search path. Without this .so
# loading fails at run-time (linking is okay).
LOCAL_LDFLAGS := \
- -Wl,--rpath,\$${ORIGIN}/../../../system/vendor/lib/mediacas -Wl,--enable-new-dtags
+ -Wl,--rpath,\$${ORIGIN}/../../../system/lib/mediacas -Wl,--enable-new-dtags
LOCAL_SHARED_LIBRARIES := \
libutils libclearkeycasplugin libstagefright_foundation libprotobuf-cpp-lite liblog
diff --git a/include/media/AVSyncSettings.h b/include/media/AVSyncSettings.h
index 4b48419..bbe211f 120000
--- a/include/media/AVSyncSettings.h
+++ b/include/media/AVSyncSettings.h
@@ -1 +1 @@
-../../media/libmedia/include/AVSyncSettings.h
\ No newline at end of file
+../../media/libmedia/include/media/AVSyncSettings.h
\ No newline at end of file
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index dd7e234..c4d6e79 120000
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioBufferProvider.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 343749c..bf52955 120000
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioEffect.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioEffect.h
\ No newline at end of file
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
index 057129b..68f54c9 120000
--- a/include/media/AudioIoDescriptor.h
+++ b/include/media/AudioIoDescriptor.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioIoDescriptor.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioIoDescriptor.h
\ No newline at end of file
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
index a2d0791..de839c6 120000
--- a/include/media/AudioMixer.h
+++ b/include/media/AudioMixer.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioMixer.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 6b6fe3b..a5889e5 120000
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioParameter.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioParameter.h
\ No newline at end of file
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index 49ee572..dd4cd53 120000
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioPolicy.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioPolicy.h
\ No newline at end of file
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
index a0302e2..558657e 120000
--- a/include/media/AudioPolicyHelper.h
+++ b/include/media/AudioPolicyHelper.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioPolicyHelper.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioPolicyHelper.h
\ No newline at end of file
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index d5a5c36..7939dd3 120000
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioRecord.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioRecord.h
\ No newline at end of file
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 0b7179f..9fad2b7 120000
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioSystem.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioSystem.h
\ No newline at end of file
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
index f266780..b6b9278 120000
--- a/include/media/AudioTimestamp.h
+++ b/include/media/AudioTimestamp.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioTimestamp.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioTimestamp.h
\ No newline at end of file
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index fddb075..303bfcd 120000
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -1 +1 @@
-../../media/libaudioclient/include/AudioTrack.h
\ No newline at end of file
+../../media/libaudioclient/include/media/AudioTrack.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index a1fd855..779bb15 120000
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -1 +1 @@
-../../media/libmedia/include/BufferProviders.h
\ No newline at end of file
+../../media/libmedia/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
index fb4ec97..409203f 120000
--- a/include/media/BufferingSettings.h
+++ b/include/media/BufferingSettings.h
@@ -1 +1 @@
-../../media/libmedia/include/BufferingSettings.h
\ No newline at end of file
+../../media/libmedia/include/media/BufferingSettings.h
\ No newline at end of file
diff --git a/include/media/CharacterEncodingDetector.h b/include/media/CharacterEncodingDetector.h
index f23ed4c..2b28387 120000
--- a/include/media/CharacterEncodingDetector.h
+++ b/include/media/CharacterEncodingDetector.h
@@ -1 +1 @@
-../../media/libmedia/include/CharacterEncodingDetector.h
\ No newline at end of file
+../../media/libmedia/include/media/CharacterEncodingDetector.h
\ No newline at end of file
diff --git a/include/media/Crypto.h b/include/media/Crypto.h
index 778f6fe..9af6495 120000
--- a/include/media/Crypto.h
+++ b/include/media/Crypto.h
@@ -1 +1 @@
-../../media/libmedia/include/Crypto.h
\ No newline at end of file
+../../media/libmedia/include/media/Crypto.h
\ No newline at end of file
diff --git a/include/media/CryptoHal.h b/include/media/CryptoHal.h
index 81f31f5..92f3137 120000
--- a/include/media/CryptoHal.h
+++ b/include/media/CryptoHal.h
@@ -1 +1 @@
-../../media/libmedia/include/CryptoHal.h
\ No newline at end of file
+../../media/libmedia/include/media/CryptoHal.h
\ No newline at end of file
diff --git a/include/media/Drm.h b/include/media/Drm.h
index d9bfa5c..ac60003 120000
--- a/include/media/Drm.h
+++ b/include/media/Drm.h
@@ -1 +1 @@
-../../media/libmedia/include/Drm.h
\ No newline at end of file
+../../media/libmedia/include/media/Drm.h
\ No newline at end of file
diff --git a/include/media/DrmHal.h b/include/media/DrmHal.h
index 21ba37b..17bb667 120000
--- a/include/media/DrmHal.h
+++ b/include/media/DrmHal.h
@@ -1 +1 @@
-../../media/libmedia/include/DrmHal.h
\ No newline at end of file
+../../media/libmedia/include/media/DrmHal.h
\ No newline at end of file
diff --git a/include/media/DrmPluginPath.h b/include/media/DrmPluginPath.h
index 06b12cf..9e05194 120000
--- a/include/media/DrmPluginPath.h
+++ b/include/media/DrmPluginPath.h
@@ -1 +1 @@
-../../media/libmedia/include/DrmPluginPath.h
\ No newline at end of file
+../../media/libmedia/include/media/DrmPluginPath.h
\ No newline at end of file
diff --git a/include/media/DrmSessionClientInterface.h b/include/media/DrmSessionClientInterface.h
index 72090a3..f4e3211 120000
--- a/include/media/DrmSessionClientInterface.h
+++ b/include/media/DrmSessionClientInterface.h
@@ -1 +1 @@
-../../media/libmedia/include/DrmSessionClientInterface.h
\ No newline at end of file
+../../media/libmedia/include/media/DrmSessionClientInterface.h
\ No newline at end of file
diff --git a/include/media/DrmSessionManager.h b/include/media/DrmSessionManager.h
index 47200f7..f0a47bf 120000
--- a/include/media/DrmSessionManager.h
+++ b/include/media/DrmSessionManager.h
@@ -1 +1 @@
-../../media/libmedia/include/DrmSessionManager.h
\ No newline at end of file
+../../media/libmedia/include/media/DrmSessionManager.h
\ No newline at end of file
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index 2431dfb..288590a 120000
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -1 +1 @@
-../../media/libeffects/factory/include/EffectsFactoryApi.h
\ No newline at end of file
+../../media/libeffects/factory/include/media/EffectsFactoryApi.h
\ No newline at end of file
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
index 9497be1..d653cc3 120000
--- a/include/media/ExtendedAudioBufferProvider.h
+++ b/include/media/ExtendedAudioBufferProvider.h
@@ -1 +1 @@
-../../media/libmedia/include/ExtendedAudioBufferProvider.h
\ No newline at end of file
+../../media/libmedia/include/media/ExtendedAudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
new file mode 120000
index 0000000..ef6f5be
--- /dev/null
+++ b/include/media/IAudioFlinger.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioFlinger.h
\ No newline at end of file
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
index d27389e..dc481e8 120000
--- a/include/media/IAudioFlingerClient.h
+++ b/include/media/IAudioFlingerClient.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IAudioFlingerClient.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IAudioFlingerClient.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 8ef16e2..08101fc 120000
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IAudioPolicyService.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IAudioPolicyService.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
index 26f6790..0d4b3e7 120000
--- a/include/media/IAudioPolicyServiceClient.h
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IAudioPolicyServiceClient.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IAudioPolicyServiceClient.h
\ No newline at end of file
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 520d44e..7fbf8f2 120000
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IAudioRecord.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IAudioRecord.h
\ No newline at end of file
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index afa6bf4..7bab1fd 120000
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IAudioTrack.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IAudioTrack.h
\ No newline at end of file
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index 53c547a..b250e07 120000
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -1 +1 @@
-../../media/libmedia/include/ICrypto.h
\ No newline at end of file
+../../media/libmedia/include/media/ICrypto.h
\ No newline at end of file
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
index 7ac813f..41cdd8b 120000
--- a/include/media/IDataSource.h
+++ b/include/media/IDataSource.h
@@ -1 +1 @@
-../../media/libmedia/include/IDataSource.h
\ No newline at end of file
+../../media/libmedia/include/media/IDataSource.h
\ No newline at end of file
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index eb2f0ec..841bb1b 120000
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -1 +1 @@
-../../media/libmedia/include/IDrm.h
\ No newline at end of file
+../../media/libmedia/include/media/IDrm.h
\ No newline at end of file
diff --git a/include/media/IDrmClient.h b/include/media/IDrmClient.h
index 4d8b50c..10aa5c0 120000
--- a/include/media/IDrmClient.h
+++ b/include/media/IDrmClient.h
@@ -1 +1 @@
-../../media/libmedia/include/IDrmClient.h
\ No newline at end of file
+../../media/libmedia/include/media/IDrmClient.h
\ No newline at end of file
diff --git a/include/media/IEffect.h b/include/media/IEffect.h
index 72d715d..2fb8bfb 120000
--- a/include/media/IEffect.h
+++ b/include/media/IEffect.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IEffect.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IEffect.h
\ No newline at end of file
diff --git a/include/media/IEffectClient.h b/include/media/IEffectClient.h
index 0614d8a..b4e39cf 120000
--- a/include/media/IEffectClient.h
+++ b/include/media/IEffectClient.h
@@ -1 +1 @@
-../../media/libaudioclient/include/IEffectClient.h
\ No newline at end of file
+../../media/libaudioclient/include/media/IEffectClient.h
\ No newline at end of file
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
index f1e112e..9d4568e 120000
--- a/include/media/IHDCP.h
+++ b/include/media/IHDCP.h
@@ -1 +1 @@
-../../media/libmedia/include/IHDCP.h
\ No newline at end of file
+../../media/libmedia/include/media/IHDCP.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
index 2e30503..2186312 120000
--- a/include/media/IMediaCodecList.h
+++ b/include/media/IMediaCodecList.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaCodecList.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaCodecList.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
index 5103277..37f6822 120000
--- a/include/media/IMediaCodecService.h
+++ b/include/media/IMediaCodecService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaCodecService.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaCodecService.h
\ No newline at end of file
diff --git a/include/media/IMediaDeathNotifier.h b/include/media/IMediaDeathNotifier.h
index 74b1656..ce3b8f0 120000
--- a/include/media/IMediaDeathNotifier.h
+++ b/include/media/IMediaDeathNotifier.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaDeathNotifier.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaDeathNotifier.h
\ No newline at end of file
diff --git a/include/media/IMediaDrmService.h b/include/media/IMediaDrmService.h
index 6efbc48..f3c260f 120000
--- a/include/media/IMediaDrmService.h
+++ b/include/media/IMediaDrmService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaDrmService.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaDrmService.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
index c17c4eb..8708c8c 120000
--- a/include/media/IMediaExtractor.h
+++ b/include/media/IMediaExtractor.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaExtractor.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaExtractor.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractorService.h b/include/media/IMediaExtractorService.h
index 1e6e8b4..3ee9f1e 120000
--- a/include/media/IMediaExtractorService.h
+++ b/include/media/IMediaExtractorService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaExtractorService.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaExtractorService.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPConnection.h b/include/media/IMediaHTTPConnection.h
index 9e544fe..0970c15 120000
--- a/include/media/IMediaHTTPConnection.h
+++ b/include/media/IMediaHTTPConnection.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaHTTPConnection.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaHTTPConnection.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPService.h b/include/media/IMediaHTTPService.h
index 6312e06..b90c34f 120000
--- a/include/media/IMediaHTTPService.h
+++ b/include/media/IMediaHTTPService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaHTTPService.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaHTTPService.h
\ No newline at end of file
diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h
new file mode 120000
index 0000000..245a29d
--- /dev/null
+++ b/include/media/IMediaLogService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaLogService.h
\ No newline at end of file
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index c2dd811..959df1a 120000
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaMetadataRetriever.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaMetadataRetriever.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index a38baf4..9414d37 120000
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaPlayer.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaPlayer.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
index 1c27dee..b6547ce 120000
--- a/include/media/IMediaPlayerClient.h
+++ b/include/media/IMediaPlayerClient.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaPlayerClient.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaPlayerClient.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 08a6a98..89c96cd 120000
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaPlayerService.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaPlayerService.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index c8b8b29..57d192c 120000
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaRecorder.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaRecorder.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorderClient.h b/include/media/IMediaRecorderClient.h
index ab703aa..89f4359 120000
--- a/include/media/IMediaRecorderClient.h
+++ b/include/media/IMediaRecorderClient.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaRecorderClient.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaRecorderClient.h
\ No newline at end of file
diff --git a/include/media/IMediaSource.h b/include/media/IMediaSource.h
index 1c3d8fe..1330ad3 120000
--- a/include/media/IMediaSource.h
+++ b/include/media/IMediaSource.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaSource.h
\ No newline at end of file
+../../media/libmedia/include/media/IMediaSource.h
\ No newline at end of file
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 989d9b2..6d5b375 120000
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -1 +1 @@
-../../media/libmedia/include/IOMX.h
\ No newline at end of file
+../../media/libmedia/include/media/IOMX.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
index 5aa58b9..4b0cf10 120000
--- a/include/media/IRemoteDisplay.h
+++ b/include/media/IRemoteDisplay.h
@@ -1 +1 @@
-../../media/libmedia/include/IRemoteDisplay.h
\ No newline at end of file
+../../media/libmedia/include/media/IRemoteDisplay.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
index 2d212e7..f29a2ee 120000
--- a/include/media/IRemoteDisplayClient.h
+++ b/include/media/IRemoteDisplayClient.h
@@ -1 +1 @@
-../../media/libmedia/include/IRemoteDisplayClient.h
\ No newline at end of file
+../../media/libmedia/include/media/IRemoteDisplayClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerClient.h b/include/media/IResourceManagerClient.h
index 1531ae2..100af9b 120000
--- a/include/media/IResourceManagerClient.h
+++ b/include/media/IResourceManagerClient.h
@@ -1 +1 @@
-../../media/libmedia/include/IResourceManagerClient.h
\ No newline at end of file
+../../media/libmedia/include/media/IResourceManagerClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
index 007aecb..9b389c6 120000
--- a/include/media/IResourceManagerService.h
+++ b/include/media/IResourceManagerService.h
@@ -1 +1 @@
-../../media/libmedia/include/IResourceManagerService.h
\ No newline at end of file
+../../media/libmedia/include/media/IResourceManagerService.h
\ No newline at end of file
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
index 90dbbf2..4943af9 120000
--- a/include/media/IStreamSource.h
+++ b/include/media/IStreamSource.h
@@ -1 +1 @@
-../../media/libmedia/include/IStreamSource.h
\ No newline at end of file
+../../media/libmedia/include/media/IStreamSource.h
\ No newline at end of file
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index cabfb79..5483fda 120000
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -1 +1 @@
-../../media/libmedia/include/JetPlayer.h
\ No newline at end of file
+../../media/libmedia/include/media/JetPlayer.h
\ No newline at end of file
diff --git a/include/media/LinearMap.h b/include/media/LinearMap.h
index 3e89686..30d4ca8 120000
--- a/include/media/LinearMap.h
+++ b/include/media/LinearMap.h
@@ -1 +1 @@
-../../media/libmedia/include/LinearMap.h
\ No newline at end of file
+../../media/libmedia/include/media/LinearMap.h
\ No newline at end of file
diff --git a/include/media/MediaCodecBuffer.h b/include/media/MediaCodecBuffer.h
index 60b7e70..8c9aa76 120000
--- a/include/media/MediaCodecBuffer.h
+++ b/include/media/MediaCodecBuffer.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaCodecBuffer.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaCodecBuffer.h
\ No newline at end of file
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index 22b10bb..ff44ce4 120000
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaCodecInfo.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaCodecInfo.h
\ No newline at end of file
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
index 993729d..9850603 120000
--- a/include/media/MediaDefs.h
+++ b/include/media/MediaDefs.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaDefs.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaDefs.h
\ No newline at end of file
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index a09f9bb..1c53511 120000
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaMetadataRetrieverInterface.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaMetadataRetrieverInterface.h
\ No newline at end of file
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 86958e4..651c6e6 120000
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaProfiles.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaProfiles.h
\ No newline at end of file
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index 6080258..e40f992 120000
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaRecorderBase.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaRecorderBase.h
\ No newline at end of file
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
index aaf931a..91346aa 120000
--- a/include/media/MediaResource.h
+++ b/include/media/MediaResource.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaResource.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaResource.h
\ No newline at end of file
diff --git a/include/media/MediaResourcePolicy.h b/include/media/MediaResourcePolicy.h
index d56b09f..5d165ee 120000
--- a/include/media/MediaResourcePolicy.h
+++ b/include/media/MediaResourcePolicy.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaResourcePolicy.h
\ No newline at end of file
+../../media/libmedia/include/media/MediaResourcePolicy.h
\ No newline at end of file
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
index cfeac14..504173e 120000
--- a/include/media/MemoryLeakTrackUtil.h
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -1 +1 @@
-../../media/libmedia/include/MemoryLeakTrackUtil.h
\ No newline at end of file
+../../media/libmedia/include/media/MemoryLeakTrackUtil.h
\ No newline at end of file
diff --git a/include/media/Metadata.h b/include/media/Metadata.h
index 4a5893d..e421168 120000
--- a/include/media/Metadata.h
+++ b/include/media/Metadata.h
@@ -1 +1 @@
-../../media/libmedia/include/Metadata.h
\ No newline at end of file
+../../media/libmedia/include/media/Metadata.h
\ No newline at end of file
diff --git a/include/media/MidiDeviceInfo.h b/include/media/MidiDeviceInfo.h
index 55ac9f5..95da7cf 120000
--- a/include/media/MidiDeviceInfo.h
+++ b/include/media/MidiDeviceInfo.h
@@ -1 +1 @@
-../../media/libmedia/include/MidiDeviceInfo.h
\ No newline at end of file
+../../media/libmedia/include/media/MidiDeviceInfo.h
\ No newline at end of file
diff --git a/include/media/MidiIoWrapper.h b/include/media/MidiIoWrapper.h
index a3fe892..786ec3d 120000
--- a/include/media/MidiIoWrapper.h
+++ b/include/media/MidiIoWrapper.h
@@ -1 +1 @@
-../../media/libmedia/include/MidiIoWrapper.h
\ No newline at end of file
+../../media/libmedia/include/media/MidiIoWrapper.h
\ No newline at end of file
diff --git a/include/media/Modulo.h b/include/media/Modulo.h
index 58f31a4..989c4cb 120000
--- a/include/media/Modulo.h
+++ b/include/media/Modulo.h
@@ -1 +1 @@
-../../media/libmedia/include/Modulo.h
\ No newline at end of file
+../../media/libmedia/include/media/Modulo.h
\ No newline at end of file
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
index 9defe79..00db207 120000
--- a/include/media/OMXBuffer.h
+++ b/include/media/OMXBuffer.h
@@ -1 +1 @@
-../../media/libmedia/include/OMXBuffer.h
\ No newline at end of file
+../../media/libmedia/include/media/OMXBuffer.h
\ No newline at end of file
diff --git a/include/media/OMXFenceParcelable.h b/include/media/OMXFenceParcelable.h
index 2e996dd..c4c1b0a 120000
--- a/include/media/OMXFenceParcelable.h
+++ b/include/media/OMXFenceParcelable.h
@@ -1 +1 @@
-../../media/libmedia/include/OMXFenceParcelable.h
\ No newline at end of file
+../../media/libmedia/include/media/OMXFenceParcelable.h
\ No newline at end of file
diff --git a/include/media/PluginLoader.h b/include/media/PluginLoader.h
index f67f2c4..9101735 120000
--- a/include/media/PluginLoader.h
+++ b/include/media/PluginLoader.h
@@ -1 +1 @@
-../../media/libmedia/include/PluginLoader.h
\ No newline at end of file
+../../media/libmedia/include/media/PluginLoader.h
\ No newline at end of file
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
index b9ee8df..2d7bc0c 120000
--- a/include/media/RecordBufferConverter.h
+++ b/include/media/RecordBufferConverter.h
@@ -1 +1 @@
-../../media/libmedia/include/RecordBufferConverter.h
\ No newline at end of file
+../../media/libmedia/include/media/RecordBufferConverter.h
\ No newline at end of file
diff --git a/include/media/RingBuffer.h b/include/media/RingBuffer.h
index 84f4943..9af28d5 120000
--- a/include/media/RingBuffer.h
+++ b/include/media/RingBuffer.h
@@ -1 +1 @@
-../../media/libmedia/include/RingBuffer.h
\ No newline at end of file
+../../media/libmedia/include/media/RingBuffer.h
\ No newline at end of file
diff --git a/include/media/SharedLibrary.h b/include/media/SharedLibrary.h
index a2a040f..9f8f5a4 120000
--- a/include/media/SharedLibrary.h
+++ b/include/media/SharedLibrary.h
@@ -1 +1 @@
-../../media/libmedia/include/SharedLibrary.h
\ No newline at end of file
+../../media/libmedia/include/media/SharedLibrary.h
\ No newline at end of file
diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h
index 7dda0d8..619f6ee 120000
--- a/include/media/SingleStateQueue.h
+++ b/include/media/SingleStateQueue.h
@@ -1 +1 @@
-../../media/libmedia/include/SingleStateQueue.h
\ No newline at end of file
+../../media/libmedia/include/media/SingleStateQueue.h
\ No newline at end of file
diff --git a/include/media/StringArray.h b/include/media/StringArray.h
index 5061652..616ce6c 120000
--- a/include/media/StringArray.h
+++ b/include/media/StringArray.h
@@ -1 +1 @@
-../../media/libmedia/include/StringArray.h
\ No newline at end of file
+../../media/libmedia/include/media/StringArray.h
\ No newline at end of file
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index f00ee2d..33df0e3 120000
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -1 +1 @@
-../../media/libaudioclient/include/ToneGenerator.h
\ No newline at end of file
+../../media/libaudioclient/include/media/ToneGenerator.h
\ No newline at end of file
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
index 9109aaa..837af44 120000
--- a/include/media/TypeConverter.h
+++ b/include/media/TypeConverter.h
@@ -1 +1 @@
-../../media/libmedia/include/TypeConverter.h
\ No newline at end of file
+../../media/libmedia/include/media/TypeConverter.h
\ No newline at end of file
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index fca8b86..ed2ec15 120000
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -1 +1 @@
-../../media/libmedia/include/Visualizer.h
\ No newline at end of file
+../../media/libmedia/include/media/Visualizer.h
\ No newline at end of file
diff --git a/include/media/convert.h b/include/media/convert.h
index 3e09482..cb0d00d 120000
--- a/include/media/convert.h
+++ b/include/media/convert.h
@@ -1 +1 @@
-../../media/libmedia/include/convert.h
\ No newline at end of file
+../../media/libmedia/include/media/convert.h
\ No newline at end of file
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index 1992b05..b401bab 120000
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -1 +1 @@
-../../media/libmedia/include/mediametadataretriever.h
\ No newline at end of file
+../../media/libmedia/include/media/mediametadataretriever.h
\ No newline at end of file
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 2b1d298..06d537b 120000
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -1 +1 @@
-../../media/libmedia/include/mediaplayer.h
\ No newline at end of file
+../../media/libmedia/include/media/mediaplayer.h
\ No newline at end of file
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 08c826f..a24deb3 120000
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -1 +1 @@
-../../media/libmedia/include/mediarecorder.h
\ No newline at end of file
+../../media/libmedia/include/media/mediarecorder.h
\ No newline at end of file
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index 42c3507..91479e0 120000
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -1 +1 @@
-../../media/libmedia/include/mediascanner.h
\ No newline at end of file
+../../media/libmedia/include/media/mediascanner.h
\ No newline at end of file
diff --git a/include/media/omx/1.0/WGraphicBufferSource.h b/include/media/omx/1.0/WGraphicBufferSource.h
index 0ca5f44..397e576 100644
--- a/include/media/omx/1.0/WGraphicBufferSource.h
+++ b/include/media/omx/1.0/WGraphicBufferSource.h
@@ -67,14 +67,11 @@
struct LWGraphicBufferSource : public BnGraphicBufferSource {
sp<TGraphicBufferSource> mBase;
LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
- BnStatus configure(
- const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+ BnStatus configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
BnStatus setSuspend(bool suspend, int64_t timeUs) override;
- BnStatus setRepeatPreviousFrameDelayUs(
- int64_t repeatAfterUs) override;
+ BnStatus setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
BnStatus setMaxFps(float maxFps) override;
- BnStatus setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
+ BnStatus setTimeLapseConfig(double fps, double captureFps) override;
BnStatus setStartTimeUs(int64_t startTimeUs) override;
BnStatus setStopTimeUs(int64_t stopTimeUs) override;
BnStatus setColorAspects(int32_t aspects) override;
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index 8cd0f03..5a884e1 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -18,9 +18,9 @@
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/include
-LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_MODULE := write_sine_callback_ndk
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
index 64b772d..f2eb984 100644
--- a/media/libaaudio/examples/write_sine/src/SineGenerator.h
+++ b/media/libaaudio/examples/write_sine/src/SineGenerator.h
@@ -79,7 +79,7 @@
}
}
- double mAmplitude = 0.05; // unitless scaler
+ double mAmplitude = 0.005; // unitless scaler
double mPhase = 0.0;
double mPhaseIncrement = 440 * M_PI * 2 / 48000;
double mFrameRate = 48000;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index d8e5ec1..6525c0a 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -23,11 +23,15 @@
#include "SineGenerator.h"
#define SAMPLE_RATE 48000
-#define NUM_SECONDS 10
+#define NUM_SECONDS 5
#define NANOS_PER_MICROSECOND ((int64_t)1000)
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+#define REQUESTED_FORMAT AAUDIO_FORMAT_PCM_I16
+#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+//#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+
static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *modeText = "unknown";
switch (mode) {
@@ -63,23 +67,21 @@
int actualSamplesPerFrame = 0;
const int requestedSampleRate = SAMPLE_RATE;
int actualSampleRate = 0;
- const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
- aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
+ aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
- //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
- int32_t framesPerBurst = 0;
- int32_t framesPerWrite = 0;
- int32_t bufferCapacity = 0;
- int32_t framesToPlay = 0;
- int32_t framesLeft = 0;
- int32_t xRunCount = 0;
- int16_t *data = nullptr;
+ int32_t framesPerBurst = 0;
+ int32_t framesPerWrite = 0;
+ int32_t bufferCapacity = 0;
+ int32_t framesToPlay = 0;
+ int32_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ float *floatData = nullptr;
+ int16_t *shortData = nullptr;
SineGenerator sineOsc1;
SineGenerator sineOsc2;
@@ -88,7 +90,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using AAudio\n", argv[0]);
+ printf("%s - Play a sine wave using AAudio, Z2\n", argv[0]);
// Use an AAudioStreamBuilder to contain requested parameters.
result = AAudio_createStreamBuilder(&aaudioBuilder);
@@ -99,8 +101,8 @@
// Request stream properties.
AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
- AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
- AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, REQUESTED_FORMAT);
+ AAudioStreamBuilder_setSharingMode(aaudioBuilder, REQUESTED_SHARING_MODE);
// Create an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
@@ -124,15 +126,16 @@
actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
printf("SharingMode: requested = %s, actual = %s\n",
- getSharingModeText(requestedSharingMode),
+ getSharingModeText(REQUESTED_SHARING_MODE),
getSharingModeText(actualSharingMode));
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
- printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+ printf("Buffer: framesPerBurst = %d\n",framesPerBurst);
+ printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
- printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
+ printf("Buffer: bufferCapacity = %d, remainder = %d\n",
bufferCapacity, bufferCapacity % framesPerBurst);
// Some DMA might use very short bursts of 16 frames. We don't need to write such small
@@ -144,14 +147,16 @@
printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
// TODO handle other data formats
// Allocate a buffer for the audio data.
- data = new int16_t[framesPerWrite * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ floatData = new float[framesPerWrite * actualSamplesPerFrame];
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ shortData = new int16_t[framesPerWrite * actualSamplesPerFrame];
+ } else {
+ printf("ERROR Unsupported data format!\n");
goto finish;
}
@@ -170,26 +175,41 @@
framesToPlay = actualSampleRate * NUM_SECONDS;
framesLeft = framesToPlay;
while (framesLeft > 0) {
- // Render sine waves to left and right channels.
- sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
- if (actualSamplesPerFrame > 1) {
- sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
+
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ // Render sine waves to left and right channels.
+ sineOsc1.render(&floatData[0], actualSamplesPerFrame, framesPerWrite);
+ if (actualSamplesPerFrame > 1) {
+ sineOsc2.render(&floatData[1], actualSamplesPerFrame, framesPerWrite);
+ }
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ // Render sine waves to left and right channels.
+ sineOsc1.render(&shortData[0], actualSamplesPerFrame, framesPerWrite);
+ if (actualSamplesPerFrame > 1) {
+ sineOsc2.render(&shortData[1], actualSamplesPerFrame, framesPerWrite);
+ }
}
// Write audio data to the stream.
- int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
- int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
- int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
+ int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
+ int32_t minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
+ int32_t actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_write(aaudioStream, floatData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_write(aaudioStream, shortData, minFrames, timeoutNanos);
+ }
if (actual < 0) {
- fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
+ fprintf(stderr, "ERROR - AAudioStream_write() returned %d\n", actual);
goto finish;
} else if (actual == 0) {
- fprintf(stderr, "WARNING - AAudioStream_write() returned %zd\n", actual);
+ fprintf(stderr, "WARNING - AAudioStream_write() returned %d\n", actual);
goto finish;
}
framesLeft -= actual;
// Use timestamp to estimate latency.
+ /*
{
int64_t presentationFrame;
int64_t presentationTime;
@@ -208,13 +228,15 @@
printf("estimatedLatencyMillis %d\n", (int)estimatedLatencyMillis);
}
}
+ */
}
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
finish:
- delete[] data;
+ delete[] floatData;
+ delete[] shortData;
AAudioStream_close(aaudioStream);
AAudioStreamBuilder_delete(aaudioBuilder);
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 9414236..8c1072d 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,8 +31,6 @@
//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-#define CALLBACK_SIZE_FRAMES 128
-
// TODO refactor common code into a single SimpleAAudio class
/**
* Simple wrapper for AAudio that opens a default stream and then calls
@@ -87,8 +85,8 @@
AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
- // AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
+ // AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
// Open an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
@@ -136,7 +134,7 @@
aaudio_result_t start() {
aaudio_result_t result = AAudioStream_requestStart(mStream);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
result, AAudio_convertResultToText(result));
}
return result;
@@ -146,7 +144,7 @@
aaudio_result_t stop() {
aaudio_result_t result = AAudioStream_requestStop(mStream);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
result, AAudio_convertResultToText(result));
}
int32_t xRunCount = AAudioStream_getXRunCount(mStream);
@@ -169,9 +167,6 @@
typedef struct SineThreadedData_s {
SineGenerator sineOsc1;
SineGenerator sineOsc2;
- // Remove these variables used for testing.
- int32_t numFrameCounts;
- int32_t frameCounts[MAX_FRAME_COUNT_RECORDS];
int scheduler;
bool schedulerChecked;
} SineThreadedData_t;
@@ -186,10 +181,6 @@
SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
- if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
- sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
- }
-
if (!sineData->schedulerChecked) {
sineData->scheduler = sched_getscheduler(gettid());
sineData->schedulerChecked = true;
@@ -236,11 +227,10 @@
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback, Z1\n", argv[0]);
player.setSharingMode(SHARING_MODE);
- myData.numFrameCounts = 0;
myData.schedulerChecked = false;
result = player.open(MyDataCallbackProc, &myData);
@@ -291,19 +281,17 @@
}
printf("Woke up now.\n");
+ printf("call stop()\n");
result = player.stop();
if (result != AAUDIO_OK) {
goto error;
}
+ printf("call close()\n");
result = player.close();
if (result != AAUDIO_OK) {
goto error;
}
- // Report data gathered in the callback.
- for (int i = 0; i < myData.numFrameCounts; i++) {
- printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
- }
if (myData.schedulerChecked) {
printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
myData.scheduler,
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
deleted file mode 100644
index 9bc5886..0000000
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Play sine waves using an AAudio background thread.
-
-//#include <assert.h>
-#include <atomic>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <math.h>
-#include <time.h>
-#include <aaudio/AAudio.h>
-#include "SineGenerator.h"
-
-#define NUM_SECONDS 5
-#define NANOS_PER_MICROSECOND ((int64_t)1000)
-#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define MILLIS_PER_SECOND 1000
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
-
-#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
-//#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-
-// Prototype for a callback.
-typedef int audio_callback_proc_t(float *outputBuffer,
- int32_t numFrames,
- void *userContext);
-
-static void *SimpleAAudioPlayerThreadProc(void *arg);
-
-// TODO merge into common code
-static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
- struct timespec time;
- int result = clock_gettime(clockId, &time);
- if (result < 0) {
- return -errno; // TODO standardize return value
- }
- return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
-}
-
-/**
- * Simple wrapper for AAudio that opens a default stream and then calls
- * a callback function to fill the output buffers.
- */
-class SimpleAAudioPlayer {
-public:
- SimpleAAudioPlayer() {}
- ~SimpleAAudioPlayer() {
- close();
- };
-
- void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
- mRequestedSharingMode = requestedSharingMode;
- }
-
- /** Also known as "sample rate"
- */
- int32_t getFramesPerSecond() {
- return mFramesPerSecond;
- }
-
- int32_t getSamplesPerFrame() {
- return mSamplesPerFrame;
- }
-
- /**
- * Open a stream
- */
- aaudio_result_t open(audio_callback_proc_t *proc, void *userContext) {
- mCallbackProc = proc;
- mUserContext = userContext;
- aaudio_result_t result = AAUDIO_OK;
-
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&mBuilder);
- if (result != AAUDIO_OK) return result;
-
- AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
- AAudioStreamBuilder_setSampleRate(mBuilder, 48000);
-
- // Open an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
- if (result != AAUDIO_OK) goto error;
-
- printf("Requested sharing mode = %d\n", mRequestedSharingMode);
- printf("Actual sharing mode = %d\n", AAudioStream_getSharingMode(mStream));
-
- // Check to see what kind of stream we actually got.
- mFramesPerSecond = AAudioStream_getSampleRate(mStream);
- printf("Actual framesPerSecond = %d\n", mFramesPerSecond);
-
- mSamplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
- printf("Actual samplesPerFrame = %d\n", mSamplesPerFrame);
-
- {
- int32_t bufferCapacity = AAudioStream_getBufferCapacityInFrames(mStream);
- printf("Actual bufferCapacity = %d\n", bufferCapacity);
- }
-
- // This is the number of frames that are read in one chunk by a DMA controller
- // or a DSP or a mixer.
- mFramesPerBurst = AAudioStream_getFramesPerBurst(mStream);
- // Some DMA might use very short bursts. We don't need to write such small
- // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
- while (mFramesPerBurst < 48) {
- mFramesPerBurst *= 2;
- }
- printf("Actual framesPerBurst = %d\n",mFramesPerBurst);
-
- mDataFormat = AAudioStream_getFormat(mStream);
- printf("Actual dataFormat = %d\n", mDataFormat);
-
- // Allocate a buffer for the audio data.
- mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
- if (mOutputBuffer == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
- }
-
- // If needed allocate a buffer for converting float to int16_t.
- if (mDataFormat == AAUDIO_FORMAT_PCM_I16) {
- printf("Allocate data conversion buffer for float=>pcm16\n");
- mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
- if (mConversionBuffer == nullptr) {
- fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
- }
- }
- return result;
-
- error:
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- return result;
- }
-
- aaudio_result_t close() {
- if (mStream != nullptr) {
- stop();
- printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
- AAudioStream_close(mStream);
- mStream = nullptr;
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- delete mOutputBuffer;
- mOutputBuffer = nullptr;
- delete mConversionBuffer;
- mConversionBuffer = nullptr;
- }
- return AAUDIO_OK;
- }
-
- // Start a thread that will call the callback proc.
- aaudio_result_t start() {
- mEnabled.store(true);
- int64_t nanosPerBurst = mFramesPerBurst * NANOS_PER_SECOND
- / mFramesPerSecond;
- return AAudioStream_createThread(mStream, nanosPerBurst,
- SimpleAAudioPlayerThreadProc,
- this);
- }
-
- // Tell the thread to stop.
- aaudio_result_t stop() {
- mEnabled.store(false);
- return AAudioStream_joinThread(mStream, nullptr, 2 * NANOS_PER_SECOND);
- }
-
- bool isEnabled() const {
- return mEnabled.load();
- }
-
- aaudio_result_t callbackLoop() {
- aaudio_result_t result = 0;
- int64_t framesWritten = 0;
- int32_t xRunCount = 0;
- bool started = false;
- int64_t framesInBuffer =
- AAudioStream_getFramesWritten(mStream) -
- AAudioStream_getFramesRead(mStream);
- int64_t framesAvailable =
- AAudioStream_getBufferSizeInFrames(mStream) - framesInBuffer;
-
- int64_t startTime = 0;
- int64_t startPosition = 0;
- int32_t loopCount = 0;
-
- // Give up after several burst periods have passed.
- const int burstsPerTimeout = 8;
- int64_t nanosPerTimeout = 0;
- int64_t runningNanosPerTimeout = 500 * NANOS_PER_MILLISECOND;
-
- while (isEnabled() && result >= 0) {
- // Call application's callback function to fill the buffer.
- if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
- mEnabled.store(false);
- }
-
- // if needed, convert from float to int16_t PCM
- //printf("app callbackLoop writing %d frames, state = %s\n", mFramesPerBurst,
- // AAudio_convertStreamStateToText(AAudioStream_getState(mStream)));
- if (mConversionBuffer != nullptr) {
- int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
- for (int i = 0; i < numSamples; i++) {
- mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
- }
- // Write the application data to stream.
- result = AAudioStream_write(mStream, mConversionBuffer,
- mFramesPerBurst, nanosPerTimeout);
- } else {
- // Write the application data to stream.
- result = AAudioStream_write(mStream, mOutputBuffer,
- mFramesPerBurst, nanosPerTimeout);
- }
-
- if (result < 0) {
- fprintf(stderr, "ERROR - AAudioStream_write() returned %d %s\n", result,
- AAudio_convertResultToText(result));
- break;
- } else if (started && result != mFramesPerBurst) {
- fprintf(stderr, "ERROR - AAudioStream_write() timed out! %d\n", result);
- break;
- } else {
- framesWritten += result;
- }
-
- if (startTime > 0 && ((loopCount & 0x01FF) == 0)) {
- double elapsedFrames = (double)(framesWritten - startPosition);
- int64_t elapsedTime = getNanoseconds() - startTime;
- double measuredRate = elapsedFrames * NANOS_PER_SECOND / elapsedTime;
- printf("app callbackLoop write() measured rate %f\n", measuredRate);
- }
- loopCount++;
-
- if (!started && framesWritten >= framesAvailable) {
- // Start buffer if fully primed.{
- result = AAudioStream_requestStart(mStream);
- printf("app callbackLoop requestStart returned %d\n", result);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n", result,
- AAudio_convertResultToText(result));
- mEnabled.store(false);
- return result;
- }
- started = true;
- nanosPerTimeout = runningNanosPerTimeout;
- startPosition = framesWritten;
- startTime = getNanoseconds();
- }
-
- {
- int32_t tempXRunCount = AAudioStream_getXRunCount(mStream);
- if (tempXRunCount != xRunCount) {
- xRunCount = tempXRunCount;
- printf("AAudioStream_getXRunCount returns %d at frame %d\n",
- xRunCount, (int) framesWritten);
- }
- }
- }
-
- result = AAudioStream_requestStop(mStream);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n", result,
- AAudio_convertResultToText(result));
- return result;
- }
-
- return result;
- }
-
-private:
- AAudioStreamBuilder *mBuilder = nullptr;
- AAudioStream *mStream = nullptr;
- float *mOutputBuffer = nullptr;
- int16_t *mConversionBuffer = nullptr;
-
- audio_callback_proc_t *mCallbackProc = nullptr;
- void *mUserContext = nullptr;
- aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
- int32_t mSamplesPerFrame = 0;
- int32_t mFramesPerSecond = 0;
- int32_t mFramesPerBurst = 0;
- aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM_I16;
-
- std::atomic<bool> mEnabled; // used to request that callback exit its loop
-};
-
-static void *SimpleAAudioPlayerThreadProc(void *arg) {
- SimpleAAudioPlayer *player = (SimpleAAudioPlayer *) arg;
- player->callbackLoop();
- return nullptr;
-}
-
-// Application data that gets passed to the callback.
-typedef struct SineThreadedData_s {
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
- int32_t samplesPerFrame = 0;
-} SineThreadedData_t;
-
-// Callback function that fills the audio output buffer.
-int MyCallbackProc(float *outputBuffer, int32_t numFrames, void *userContext) {
- SineThreadedData_t *data = (SineThreadedData_t *) userContext;
- // Render sine waves to left and right channels.
- data->sineOsc1.render(&outputBuffer[0], data->samplesPerFrame, numFrames);
- if (data->samplesPerFrame > 1) {
- data->sineOsc2.render(&outputBuffer[1], data->samplesPerFrame, numFrames);
- }
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- (void)argc; // unused
- SimpleAAudioPlayer player;
- SineThreadedData_t myData;
- aaudio_result_t result;
-
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using an AAudio Thread\n", argv[0]);
-
- result = player.open(MyCallbackProc, &myData);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.open() returned %d\n", result);
- goto error;
- }
- printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
- printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
- myData.sineOsc1.setup(440.0, 48000);
- myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
- myData.sineOsc2.setup(660.0, 48000);
- myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
- myData.samplesPerFrame = player.getSamplesPerFrame();
-
- result = player.start();
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.start() returned %d\n", result);
- goto error;
- }
-
- printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
- for (int i = 0; i < NUM_SECONDS && player.isEnabled(); i++) {
- // FIXME sleep is not an NDK API
- // sleep(NUM_SECONDS);
- const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
- (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
- }
- printf("Woke up now!\n");
-
- result = player.stop();
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.stop() returned %d\n", result);
- goto error;
- }
-
- printf("Player stopped.\n");
- result = player.close();
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.close() returned %d\n", result);
- goto error;
- }
-
- printf("SUCCESS\n");
- return EXIT_SUCCESS;
-error:
- player.close();
- printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
- return EXIT_FAILURE;
-}
-
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
index c02b91c..e4da6a8 100644
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -18,25 +18,6 @@
include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
-
-LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
- libbinder libcutils libutils \
- libaudioclient liblog
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := write_sine_threaded
-include $(BUILD_EXECUTABLE)
-
-
-
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index d0c7c22..4c1ea55 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -54,9 +54,7 @@
AAUDIO_FORMAT_INVALID = -1,
AAUDIO_FORMAT_UNSPECIFIED = 0,
AAUDIO_FORMAT_PCM_I16,
- AAUDIO_FORMAT_PCM_FLOAT,
- AAUDIO_FORMAT_PCM_I8_24,
- AAUDIO_FORMAT_PCM_I32
+ AAUDIO_FORMAT_PCM_FLOAT
};
typedef int32_t aaudio_format_t;
@@ -584,61 +582,10 @@
int32_t numFrames,
int64_t timeoutNanoseconds);
-
-// ============================================================
-// High priority audio threads
-// ============================================================
-
-/**
- * @deprecated Use AudioStreamBuilder_setCallback()
- */
-typedef void *(*aaudio_audio_thread_proc_t)(void *);
-
-/**
- * @deprecated Use AudioStreamBuilder_setCallback()
- *
- * Create a thread associated with a stream. The thread has special properties for
- * low latency audio performance. This thread can be used to implement a callback API.
- *
- * Only one thread may be associated with a stream.
- *
- * If you are using multiple streams then we recommend that you only do
- * blocking reads or writes on one stream. You can do non-blocking I/O on the
- * other streams by setting the timeout to zero.
- * This thread should be created for the stream that you will block on.
- *
- * Note that this API is in flux.
- *
- * @param stream A stream created using AAudioStreamBuilder_openStream().
- * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
- * @param threadProc your thread entry point
- * @param arg an argument that will be passed to your thread entry point
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream* stream,
- int64_t periodNanoseconds,
- aaudio_audio_thread_proc_t threadProc,
- void *arg);
-
-/**
- * @deprecated Use AudioStreamBuilder_setCallback()
- *
- * Wait until the thread exits or an error occurs.
- *
- * @param stream A stream created using AAudioStreamBuilder_openStream().
- * @param returnArg a pointer to a variable to receive the return value
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return AAUDIO_OK or a negative error.
- */
-AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream* stream,
- void **returnArg,
- int64_t timeoutNanoseconds);
-
// ============================================================
// Stream - queries
// ============================================================
-
/**
* This can be used to adjust the latency of the buffer by changing
* the threshold where blocking will occur.
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index f22fdfe..1024e1f 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -24,8 +24,6 @@
AAudioStream_waitForStateChange;
AAudioStream_read;
AAudioStream_write;
- AAudioStream_createThread;
- AAudioStream_joinThread;
AAudioStream_setBufferSizeInFrames;
AAudioStream_getBufferSizeInFrames;
AAudioStream_getFramesPerDataCallback;
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 8315c40..3f1bba3 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -75,6 +75,10 @@
return gAAudioService;
}
+static void dropAAudioService() {
+ Mutex::Autolock _l(gServiceLock);
+ gAAudioService.clear(); // force a reconnect
+}
AAudioBinderClient::AAudioBinderClient()
: AAudioServiceInterface() {}
@@ -88,14 +92,26 @@
*/
aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
AAudioStreamConfiguration &configurationOutput) {
+ aaudio_handle_t stream;
+ for (int i = 0; i < 2; i++) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
- const sp<IAAudioService> &service = getAAudioService();
- if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->openStream(request, configurationOutput);
+ stream = service->openStream(request, configurationOutput);
+
+ if (stream == AAUDIO_ERROR_NO_SERVICE) {
+ ALOGE("AAudioBinderClient: lost connection to AAudioService.");
+ dropAAudioService(); // force a reconnect
+ } else {
+ break;
+ }
+ }
+ return stream;
}
aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
-
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->closeStream(streamHandle);
@@ -106,37 +122,33 @@
*/
aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
AudioEndpointParcelable &parcelable) {
-
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->getStreamDescription(streamHandle, parcelable);
}
-/**
-* Start the flow of data.
-*/
aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
return service->startStream(streamHandle);
}
-/**
-* Stop the flow of data such that start() can resume without loss of data.
-*/
aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->startStream(streamHandle);
+ return service->pauseStream(streamHandle);
}
-/**
-* Discard any data held by the underlying HAL or Service.
-*/
+aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->stopStream(streamHandle);
+}
+
aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
const sp<IAAudioService> &service = getAAudioService();
if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
- return service->startStream(streamHandle);
+ return service->flushStream(streamHandle);
}
/**
@@ -163,5 +175,3 @@
clientProcessId,
clientThreadId);
}
-
-
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 1497177..f7f2808 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -66,6 +66,8 @@
*/
aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+ aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;
+
/**
* Discard any data held by the underlying HAL or Service.
* This is asynchronous. When complete, the service will send a FLUSHED event.
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index 0d5bae5..2de560b 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -35,6 +35,7 @@
GET_STREAM_DESCRIPTION,
START_STREAM,
PAUSE_STREAM,
+ STOP_STREAM,
FLUSH_STREAM,
REGISTER_AUDIO_THREAD,
UNREGISTER_AUDIO_THREAD
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 62fd894..b565499 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -63,6 +63,11 @@
virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
/**
+ * Stop the flow of data after data currently inthe buffer has played.
+ */
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
* Discard any data held by the underlying HAL or Service.
*/
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 19d6d52..d75aa32 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -35,6 +35,7 @@
typedef enum aaudio_service_event_e : uint32_t {
AAUDIO_SERVICE_EVENT_STARTED,
AAUDIO_SERVICE_EVENT_PAUSED,
+ AAUDIO_SERVICE_EVENT_STOPPED,
AAUDIO_SERVICE_EVENT_FLUSHED,
AAUDIO_SERVICE_EVENT_CLOSED,
AAUDIO_SERVICE_EVENT_DISCONNECTED,
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 5adb477..09eaa42 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -43,7 +43,6 @@
status = parcel->writeInt32(mSamplesPerFrame);
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mSharingMode);
- ALOGD("AAudioStreamConfiguration.writeToParcel(): mSharingMode = %d", mSharingMode);
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mAudioFormat);
if (status != NO_ERROR) goto error;
@@ -66,7 +65,6 @@
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mSharingMode = (aaudio_sharing_mode_t) temp;
- ALOGD("AAudioStreamConfiguration.readFromParcel(): mSharingMode = %d", mSharingMode);
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mAudioFormat = (aaudio_audio_format_t) temp;
@@ -93,8 +91,6 @@
switch (mAudioFormat) {
case AAUDIO_FORMAT_PCM_I16:
case AAUDIO_FORMAT_PCM_FLOAT:
- case AAUDIO_FORMAT_PCM_I8_24:
- case AAUDIO_FORMAT_PCM_I32:
break;
default:
ALOGE("AAudioStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index ec21f8a..a5c27b9 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -49,6 +49,10 @@
if (status != NO_ERROR) goto error;
status = parcel->writeInt32((int32_t) mDirection);
if (status != NO_ERROR) goto error;
+
+ status = parcel->writeBool(mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
status = mConfiguration.writeToParcel(parcel);
if (status != NO_ERROR) goto error;
return NO_ERROR;
@@ -63,12 +67,18 @@
status_t status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mUserId = (uid_t) temp;
+
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mProcessId = (pid_t) temp;
+
status = parcel->readInt32(&temp);
if (status != NO_ERROR) goto error;
mDirection = (aaudio_direction_t) temp;
+
+ status = parcel->readBool(&mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
status = mConfiguration.readFromParcel(parcel);
if (status != NO_ERROR) goto error;
return NO_ERROR;
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 992e978..d4bfbe1 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -60,6 +60,15 @@
mDirection = direction;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ void setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ }
+
+
const AAudioStreamConfiguration &getConstantConfiguration() const {
return mConfiguration;
}
@@ -81,6 +90,7 @@
uid_t mUserId;
pid_t mProcessId;
aaudio_direction_t mDirection;
+ bool mSharingModeMatchRequired = false;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index 03fc088..b8ef611 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -45,16 +45,25 @@
Parcel data, reply;
// send command
data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- ALOGE("BpAAudioService::client openStream request dump --------------------");
- request.dump();
+ ALOGV("BpAAudioService::client openStream --------------------");
+ // request.dump();
request.writeToParcel(&data);
status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+ ALOGV("BpAAudioService::client openStream returned %d", err);
if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client openStream transact failed %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
// parse reply
aaudio_handle_t stream;
- reply.readInt32(&stream);
+ err = reply.readInt32(&stream);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (stream < 0) {
+ ALOGE("BpAAudioService::client OPEN_STREAM passed stream %d", stream);
+ return stream;
+ }
err = configurationOutput.readFromParcel(&reply);
if (err != NO_ERROR) {
ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
@@ -71,6 +80,7 @@
data.writeInt32(streamHandle);
status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client closeStream transact failed %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
// parse reply
@@ -145,6 +155,21 @@
return res;
}
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(STOP_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
Parcel data, reply;
// send command
@@ -226,11 +251,11 @@
case OPEN_STREAM: {
request.readFromParcel(&data);
- ALOGD("BnAAudioService::client openStream request dump --------------------");
- request.dump();
+ //ALOGD("BnAAudioService::client openStream request dump --------------------");
+ //request.dump();
stream = openStream(request, configuration);
- ALOGV("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+ //ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
reply->writeInt32(stream);
configuration.writeToParcel(reply);
return NO_ERROR;
@@ -238,18 +263,17 @@
case CLOSE_STREAM: {
data.readInt32(&stream);
- ALOGV("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
result = closeStream(stream);
+ //ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X, result = %d",
+ // stream, result);
reply->writeInt32(result);
return NO_ERROR;
} break;
case GET_STREAM_DESCRIPTION: {
data.readInt32(&stream);
- ALOGI("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
aaudio::AudioEndpointParcelable parcelable;
result = getStreamDescription(stream, parcelable);
- ALOGI("BnAAudioService::onTransact getStreamDescription() returns %d", result);
if (result != AAUDIO_OK) {
return AAudioConvert_aaudioToAndroidStatus(result);
}
@@ -277,7 +301,16 @@
data.readInt32(&stream);
result = pauseStream(stream);
ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
- stream, result);
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case STOP_STREAM: {
+ data.readInt32(&stream);
+ result = stopStream(stream);
+ ALOGV("BnAAudioService::onTransact STOP_STREAM 0x%08X, result = %d",
+ stream, result);
reply->writeInt32(result);
return NO_ERROR;
} break;
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
index ab7fd1b..2cee651 100644
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -69,6 +69,12 @@
virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
/**
+ * Stop the flow of data such that the data currently in the buffer is played.
+ * This is asynchronous. When complete, the service will send a STOPPED event.
+ */
+ virtual aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
* Discard any data held by the underlying HAL or Service.
* This is asynchronous. When complete, the service will send a FLUSHED event.
*/
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 649c884..0f501dd 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -61,9 +61,8 @@
return status;
}
if (mSizeInBytes > 0) {
-// FIXME mFd = dup(parcel->readFileDescriptor());
- // Why is the ALSA resource not getting freed?!
- mFd = fcntl(parcel->readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
+ int originalFD = parcel->readFileDescriptor();
+ mFd = fcntl(originalFD, F_DUPFD_CLOEXEC, 0);
if (mFd == -1) {
status = -errno;
ALOGE("SharedMemoryParcelable readFileDescriptor fcntl() failed : %d", status);
@@ -101,11 +100,6 @@
return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mResolvedAddress == nullptr) {
- /* TODO remove
- int fd = fcntl(mFd, F_DUPFD_CLOEXEC, 0);
- ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, (%s)",
- mFd, mSizeInBytes, strerror(errno));
- */
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
MAP_SHARED, mFd, 0);
if (mResolvedAddress == nullptr) {
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index fe049b2..6f87df6 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -59,35 +59,35 @@
ALOGE("AudioEndpoint_validateQueueDescriptor() NULL dataAddress");
return AAUDIO_ERROR_NULL;
}
- ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+ ALOGV("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
type,
descriptor->dataAddress);
- ALOGD("AudioEndpoint_validateQueueDescriptor readCounter at %p, writeCounter at %p",
+ ALOGV("AudioEndpoint_validateQueueDescriptor readCounter at %p, writeCounter at %p",
descriptor->readCounterAddress,
descriptor->writeCounterAddress);
// Try to READ from the data area.
// This code will crash if the mmap failed.
uint8_t value = descriptor->dataAddress[0];
- ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
(int) value);
// Try to WRITE to the data area.
descriptor->dataAddress[0] = value * 3;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote successfully");
if (descriptor->readCounterAddress) {
fifo_counter_t counter = *descriptor->readCounterAddress;
- ALOGD("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
(int) counter);
*descriptor->readCounterAddress = counter;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
}
if (descriptor->writeCounterAddress) {
fifo_counter_t counter = *descriptor->writeCounterAddress;
- ALOGD("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
(int) counter);
*descriptor->writeCounterAddress = counter;
- ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
}
return AAUDIO_OK;
}
@@ -107,7 +107,7 @@
// TODO maybe remove after debugging
aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
if (result != AAUDIO_OK) {
- ALOGD("AudioEndpoint_validateQueueDescriptor returned %d %s",
+ ALOGE("AudioEndpoint_validateQueueDescriptor returned %d %s",
result, AAudio_convertResultToText(result));
return result;
}
@@ -142,10 +142,10 @@
assert(descriptor->framesPerBurst > 0);
assert(descriptor->framesPerBurst < 8 * 1024); // FIXME just for initial debugging
assert(descriptor->dataAddress != nullptr);
- ALOGD("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
- ALOGD("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+ ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+ ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
- ALOGD("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+ ALOGV("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 7304205..af4b93a 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -40,9 +40,6 @@
#define LOG_TIMESTAMPS 0
using android::String16;
-using android::IServiceManager;
-using android::defaultServiceManager;
-using android::interface_cast;
using android::Mutex;
using android::WrappingBuffer;
@@ -53,7 +50,10 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
-#define ALOG_CONDITION (mInService == false)
+//static int64_t s_logCounter = 0;
+//#define MYLOG_CONDITION (mInService == true && s_logCounter++ < 500)
+//#define MYLOG_CONDITION (s_logCounter++ < 500000)
+#define MYLOG_CONDITION (1)
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
@@ -62,8 +62,7 @@
, mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
, mFramesPerBurst(16)
, mServiceInterface(serviceInterface)
- , mInService(inService)
-{
+ , mInService(inService) {
}
AudioStreamInternal::~AudioStreamInternal() {
@@ -84,27 +83,26 @@
if (getFormat() == AAUDIO_UNSPECIFIED) {
setFormat(AAUDIO_FORMAT_PCM_FLOAT);
}
+ // Request FLOAT for the shared mixer.
+ request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
// Build the request to send to the server.
request.setUserId(getuid());
request.setProcessId(getpid());
request.setDirection(getDirection());
+ request.setSharingModeMatchRequired(isSharingModeMatchRequired());
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
- request.getConfiguration().setAudioFormat(getFormat());
- aaudio_sharing_mode_t sharingMode = getSharingMode();
- ALOGE("AudioStreamInternal.open(): sharingMode %d", sharingMode);
- request.getConfiguration().setSharingMode(sharingMode);
+ request.getConfiguration().setSharingMode(getSharingMode());
+
request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
- (unsigned int)mServiceStreamHandle);
if (mServiceStreamHandle < 0) {
result = mServiceStreamHandle;
- ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
+ ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
} else {
result = configuration.validate();
if (result != AAUDIO_OK) {
@@ -120,10 +118,9 @@
mDeviceFormat = configuration.getAudioFormat();
result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.open(): getStreamDescriptor(0x%08X) returns %d",
- mServiceStreamHandle, result);
if (result != AAUDIO_OK) {
- ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+ ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
+ getLocationName(), result);
mServiceInterface.closeStream(mServiceStreamHandle);
return result;
}
@@ -140,8 +137,19 @@
mAudioEndpoint.configure(&mEndpointDescriptor);
mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
- assert(mFramesPerBurst >= 16);
- assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+ int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
+
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
+ getLocationName(), mFramesPerBurst, capacity);
+ // Validate result from server.
+ if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
+ ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
+ ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
mClockModel.setSampleRate(getSampleRate());
mClockModel.setFramesPerBurst(mFramesPerBurst);
@@ -149,7 +157,8 @@
if (getDataCallbackProc()) {
mCallbackFrames = builder.getFramesPerDataCallback();
if (mCallbackFrames > getBufferCapacity() / 2) {
- ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
+ ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+ mCallbackFrames, getBufferCapacity());
mServiceInterface.closeStream(mServiceStreamHandle);
return AAUDIO_ERROR_OUT_OF_RANGE;
@@ -175,7 +184,8 @@
}
aaudio_result_t AudioStreamInternal::close() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+ mServiceStreamHandle);
if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
@@ -250,7 +260,7 @@
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): start()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -275,8 +285,10 @@
int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
// Wait for at least a second or some number of callbacks to join the thread.
- int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
- / getSampleRate();
+ int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
+ * framesPerOperation
+ * AAUDIO_NANOS_PER_SECOND)
+ / getSampleRate();
if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
timeoutNanoseconds = MIN_TIMEOUT_NANOS;
}
@@ -295,28 +307,34 @@
aaudio_result_t AudioStreamInternal::requestPauseInternal()
{
- ALOGD("AudioStreamInternal(): pause()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_PAUSING);
- return mServiceInterface.startStream(mServiceStreamHandle);
+ return mServiceInterface.pauseStream(mServiceStreamHandle);
}
aaudio_result_t AudioStreamInternal::requestPause()
{
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
aaudio_result_t result = stopCallback();
if (result != AAUDIO_OK) {
return result;
}
- return requestPauseInternal();
+ result = requestPauseInternal();
+ ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
+ return result;
}
aaudio_result_t AudioStreamInternal::requestFlush() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): flush()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -325,35 +343,45 @@
}
void AudioStreamInternal::onFlushFromServer() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t framesFlushed = writeCounter - readCounter;
mFramesOffsetFromService += framesFlushed;
+
// Flush written frames by forcing writeCounter to readCounter.
// This is because we cannot move the read counter in the hardware.
mAudioEndpoint.setDownDataWriteCounter(readCounter);
}
+aaudio_result_t AudioStreamInternal::requestStopInternal()
+{
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ mClockModel.stop(AudioClock::getNanoseconds());
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ return mServiceInterface.stopStream(mServiceStreamHandle);
+}
+
aaudio_result_t AudioStreamInternal::requestStop()
{
- // TODO better implementation of requestStop()
- aaudio_result_t result = requestPause();
- if (result == AAUDIO_OK) {
- aaudio_stream_state_t state;
- result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
- &state,
- 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
- if (result == AAUDIO_OK) {
- result = requestFlush();
- }
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
}
+ result = requestStopInternal();
+ ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
return result;
}
aaudio_result_t AudioStreamInternal::registerThread() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): registerThread()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -364,7 +392,6 @@
}
aaudio_result_t AudioStreamInternal::unregisterThread() {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal(): unregisterThread()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -394,16 +421,16 @@
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
int64_t nanoTime = command.timestamp.timestamp;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
(long long) framePosition,
(long long) nanoTime);
int64_t nanosDelta = nanoTime - oldTime;
if (nanosDelta > 0 && oldTime > 0) {
int64_t framesDelta = framePosition - oldPosition;
int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
}
oldPosition = framePosition;
oldTime = nanoTime;
@@ -422,23 +449,27 @@
aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
aaudio_result_t result = AAUDIO_OK;
- ALOGD_IF(ALOG_CONDITION, "processCommands() got event %d", message->event.event);
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
switch (message->event.event) {
case AAUDIO_SERVICE_EVENT_STARTED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
setState(AAUDIO_STREAM_STATE_STARTED);
break;
case AAUDIO_SERVICE_EVENT_PAUSED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
setState(AAUDIO_STREAM_STATE_PAUSED);
break;
+ case AAUDIO_SERVICE_EVENT_STOPPED:
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ break;
case AAUDIO_SERVICE_EVENT_FLUSHED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
setState(AAUDIO_STREAM_STATE_FLUSHED);
onFlushFromServer();
break;
case AAUDIO_SERVICE_EVENT_CLOSED:
- ALOGD_IF(ALOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
setState(AAUDIO_STREAM_STATE_CLOSED);
break;
case AAUDIO_SERVICE_EVENT_DISCONNECTED:
@@ -448,7 +479,7 @@
break;
case AAUDIO_SERVICE_EVENT_VOLUME:
mVolume = message->event.dataDouble;
- ALOGD_IF(ALOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
+ ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
break;
default:
ALOGW("WARNING - processCommands() Unrecognized event = %d",
@@ -463,7 +494,7 @@
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
AAudioServiceMessage message;
if (mAudioEndpoint.readUpCommand(&message) != 1) {
break; // no command this time, no problem
@@ -478,7 +509,7 @@
break;
default:
- ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
(int) message.what);
result = AAUDIO_ERROR_UNEXPECTED_VALUE;
break;
@@ -497,19 +528,13 @@
int64_t currentTimeNanos = AudioClock::getNanoseconds();
int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write(%p, %d) at time %08llu , mState = %s",
- // buffer, numFrames, (unsigned long long) currentTimeNanos,
- // AAudio_convertStreamStateToText(getState()));
// Write until all the data has been written or until a timeout occurs.
while (framesLeft > 0) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesLeft = %d, loopCount = %d =====",
- // framesLeft, loopCount++);
// The call to writeNow() will not block. It will just write as much as it can.
int64_t wakeTimeNanos = 0;
aaudio_result_t framesWritten = writeNow(source, framesLeft,
currentTimeNanos, &wakeTimeNanos);
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() loop: framesWritten = %d", framesWritten);
if (framesWritten < 0) {
ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
result = framesWritten;
@@ -522,7 +547,6 @@
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (framesLeft > 0) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
// clip the wake time to something reasonable
if (wakeTimeNanos < currentTimeNanos) {
wakeTimeNanos = currentTimeNanos;
@@ -534,16 +558,13 @@
break;
}
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
- // (long long) (wakeTimeNanos - currentTimeNanos));
- AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+ int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ AudioClock::sleepForNanos(sleepForNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
// return error or framesWritten
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::write() result = %d, framesLeft = %d, #%d",
- // result, framesLeft, loopCount);
(void) loopCount;
return (result < 0) ? result : numFrames - framesLeft;
}
@@ -552,17 +573,15 @@
aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
int64_t currentNanoTime, int64_t *wakeTimePtr) {
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow(%p) - enter", buffer);
{
aaudio_result_t result = processCommands();
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - processCommands() returned %d", result);
if (result != AAUDIO_OK) {
return result;
}
}
if (mAudioEndpoint.isOutputFreeRunning()) {
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
// Update data queue based on the timing model.
int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
@@ -575,9 +594,9 @@
}
// Write some data to the buffer.
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
// numFrames, framesWritten);
// Calculate an ideal time to wake up.
@@ -585,7 +604,7 @@
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
- //ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
+ //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
// AAudio_convertStreamStateToText(state));
switch (state) {
case AAUDIO_STREAM_STATE_OPEN:
@@ -612,7 +631,7 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
// (unsigned long long)currentNanoTime,
// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
@@ -623,9 +642,8 @@
// TODO this function needs a major cleanup.
aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
int32_t numFrames) {
- // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
+ // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
WrappingBuffer wrappingBuffer;
- mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
uint8_t *source = (uint8_t *) buffer;
int32_t framesLeft = numFrames;
@@ -640,18 +658,25 @@
if (framesToWrite > framesAvailable) {
framesToWrite = framesAvailable;
}
- int32_t numBytes = getBytesPerFrame();
+ int32_t numBytes = getBytesPerFrame() * framesToWrite;
// TODO handle volume scaling
if (getFormat() == mDeviceFormat) {
// Copy straight through.
memcpy(wrappingBuffer.data[partIndex], source, numBytes);
} else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
- && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
// Data conversion.
AAudioConvert_floatToPcm16(
(const float *) source,
framesToWrite * getSamplesPerFrame(),
(int16_t *) wrappingBuffer.data[partIndex]);
+ } else if (getFormat() == AAUDIO_FORMAT_PCM_I16
+ && mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ // Data conversion.
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ framesToWrite * getSamplesPerFrame(),
+ (float *) wrappingBuffer.data[partIndex]);
} else {
// TODO handle more conversions
ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
@@ -661,6 +686,8 @@
source += numBytes;
framesLeft -= framesToWrite;
+ } else {
+ break;
}
partIndex++;
}
@@ -670,7 +697,7 @@
if (framesWritten > 0) {
incrementFramesWritten(framesWritten);
}
- // ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+ // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
return framesWritten;
}
@@ -680,7 +707,15 @@
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
int32_t actualFrames = 0;
+ // Round to the next highest burst size.
+ if (getFramesPerBurst() > 0) {
+ int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ requestedFrames = numBursts * getFramesPerBurst();
+ }
+
aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
+ getLocationName(), requestedFrames, actualFrames);
if (result < 0) {
return result;
} else {
@@ -714,7 +749,7 @@
} else {
mLastFramesRead = framesRead;
}
- ALOGD_IF(ALOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
return framesRead;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 1aa3b0f..8244311 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -94,6 +94,7 @@
aaudio_result_t processCommands();
aaudio_result_t requestPauseInternal();
+ aaudio_result_t requestStopInternal();
aaudio_result_t stopCallback();
@@ -129,6 +130,11 @@
int32_t numFrames);
void processTimestamp(uint64_t position, int64_t time);
+
+ const char *getLocationName() const {
+ return mInService ? "SERVICE" : "CLIENT";
+ }
+
// Adjust timing model based on timestamp from service.
IsochronousClockModel mClockModel; // timing model for chasing the HAL
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index c278c8b..21e3e70 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -101,13 +101,13 @@
// or we may be drifting due to a slow HW clock.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
- ALOGI("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+ ALOGV("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
(int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
} else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
// Later than expected timestamp.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
- ALOGI("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+ ALOGV("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
(int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
(int) (mMaxLatenessInNanos / 1000));
}
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index b17309c..97726e6 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -168,16 +168,15 @@
void *userData)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
streamBuilder->setDataCallbackProc(callback);
streamBuilder->setDataCallbackUserData(userData);
}
+
AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
AAudioStream_errorCallback callback,
void *userData)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
streamBuilder->setErrorCallbackProc(callback);
streamBuilder->setErrorCallbackUserData(userData);
}
@@ -186,10 +185,10 @@
int32_t frames)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- ALOGD("%s: frames = %d", __func__, frames);
streamBuilder->setFramesPerDataCallback(frames);
}
+// TODO merge AAudioInternal_openStream into AAudioStreamBuilder_openStream
static aaudio_result_t AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
AAudioStream** streamPtr)
{
@@ -206,7 +205,7 @@
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
- ALOGD("AAudioStreamBuilder_openStream(): builder = %p", builder);
+ ALOGD("AAudioStreamBuilder_openStream() ----------------------------------------------");
AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
return AAudioInternal_openStream(streamBuilder, streamPtr);
}
@@ -228,6 +227,7 @@
if (audioStream != nullptr) {
audioStream->close();
delete audioStream;
+ ALOGD("AAudioStream_close() ----------------------------------------------");
return AAUDIO_OK;
}
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -325,29 +325,6 @@
}
// ============================================================
-// Miscellaneous
-// ============================================================
-
-AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream* stream,
- int64_t periodNanoseconds,
- aaudio_audio_thread_proc_t threadProc, void *arg)
-{
- AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- if (audioStream->getDataCallbackProc() != nullptr) {
- return AAUDIO_ERROR_INCOMPATIBLE;
- }
- return audioStream->createThread(periodNanoseconds, threadProc, arg);
-}
-
-AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream* stream,
- void **returnArg,
- int64_t timeoutNanoseconds)
-{
- AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->joinThread(returnArg, timeoutNanoseconds);
-}
-
-// ============================================================
// Stream - queries
// ============================================================
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 7c0b5ae..9690848 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -38,7 +38,6 @@
aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
-
// Copy parameters from the Builder because the Builder may be deleted after this call.
mSamplesPerFrame = builder.getSamplesPerFrame();
mSampleRate = builder.getSampleRate();
@@ -46,6 +45,7 @@
mFormat = builder.getFormat();
mDirection = builder.getDirection();
mSharingMode = builder.getSharingMode();
+ mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
// callbacks
mFramesPerDataCallback = builder.getFramesPerDataCallback();
@@ -53,10 +53,19 @@
mErrorCallbackProc = builder.getErrorCallbackProc();
mDataCallbackUserData = builder.getDataCallbackUserData();
- // TODO validate more parameters.
- if (mErrorCallbackProc != nullptr && mDataCallbackProc == nullptr) {
- ALOGE("AudioStream::open(): disconnect callback cannot be used without a data callback.");
- return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ // This is very helpful for debugging in the future.
+ ALOGI("AudioStream.open(): rate = %d, channels = %d, format = %d, sharing = %d",
+ mSampleRate, mSamplesPerFrame, mFormat, mSharingMode);
+
+ // Check for values that are ridiculously out of range to prevent math overflow exploits.
+ // The service will do a better check.
+ if (mSamplesPerFrame < 0 || mSamplesPerFrame > 128) {
+ ALOGE("AudioStream::open(): samplesPerFrame out of range = %d", mSamplesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mSampleRate < 0 || mSampleRate > 1000000) {
+ ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
+ return AAUDIO_ERROR_INVALID_RATE;
}
if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
ALOGE("AudioStream::open(): illegal direction %d", mDirection);
@@ -70,27 +79,6 @@
close();
}
-aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
- aaudio_stream_state_t endingState,
- int64_t timeoutNanoseconds)
-{
- aaudio_stream_state_t state = getState();
- aaudio_stream_state_t nextState = state;
- if (state == startingState && state != endingState) {
- aaudio_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
- if (result != AAUDIO_OK) {
- return result;
- }
- }
-// It's OK if the expected transition has already occurred.
-// But if we reach an unexpected state then that is an error.
- if (nextState != endingState) {
- return AAUDIO_ERROR_UNEXPECTED_STATE;
- } else {
- return AAUDIO_OK;
- }
-}
-
aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
aaudio_stream_state_t *nextState,
int64_t timeoutNanoseconds)
@@ -123,16 +111,15 @@
return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
}
-// This registers the app's background audio thread with the server before
+// This registers the callback thread with the server before
// passing control to the app. This gives the server an opportunity to boost
// the thread's performance characteristics.
void* AudioStream::wrapUserThread() {
void* procResult = nullptr;
mThreadRegistrationResult = registerThread();
if (mThreadRegistrationResult == AAUDIO_OK) {
- // Call application procedure. This may take a very long time.
+ // Run callback loop. This may take a very long time.
procResult = mThreadProc(mThreadArg);
- ALOGD("AudioStream::mThreadProc() returned");
mThreadRegistrationResult = unregisterThread();
}
return procResult;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index da71906..916870b 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -27,6 +27,8 @@
namespace aaudio {
+typedef void *(*aaudio_audio_thread_proc_t)(void *);
+
class AudioStreamBuilder;
/**
@@ -152,6 +154,10 @@
return mSharingMode;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
aaudio_direction_t getDirection() const {
return mDirection;
}
@@ -225,16 +231,6 @@
}
/**
- * Wait for a transition from one state to another.
- * @return AAUDIO_OK if the endingState was observed, or AAUDIO_ERROR_UNEXPECTED_STATE
- * if any state that was not the startingState or endingState was observed
- * or AAUDIO_ERROR_TIMEOUT
- */
- virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
- aaudio_stream_state_t endingState,
- int64_t timeoutNanoseconds);
-
- /**
* This should not be called after the open() call.
*/
void setSampleRate(int32_t sampleRate) {
@@ -292,6 +288,7 @@
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index a4d1970..4e0b8c6 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -30,10 +30,11 @@
#include "legacy/AudioStreamRecord.h"
#include "legacy/AudioStreamTrack.h"
-// Enable a mixer in AAudio service that will mix stream to an ALSA MMAP buffer.
+// Enable a mixer in AAudio service that will mix streams to an ALSA MMAP buffer.
#define MMAP_SHARED_ENABLED 0
-// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer.
-#define MMAP_EXCLUSIVE_ENABLED 1
+
+// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer directly.
+#define MMAP_EXCLUSIVE_ENABLED 0
using namespace aaudio;
@@ -50,7 +51,7 @@
AudioStream* audioStream = nullptr;
AAudioBinderClient *aaudioClient = nullptr;
const aaudio_sharing_mode_t sharingMode = getSharingMode();
- ALOGD("AudioStreamBuilder.build() sharingMode = %d", sharingMode);
+
switch (getDirection()) {
case AAUDIO_DIRECTION_INPUT:
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index c0ee6fe..25baf4c 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -82,6 +82,15 @@
return this;
}
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ AudioStreamBuilder* setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ return this;
+ }
+
int32_t getBufferCapacity() const {
return mBufferCapacity;
}
@@ -109,7 +118,6 @@
return this;
}
-
void *getDataCallbackUserData() const {
return mDataCallbackUserData;
}
@@ -153,6 +161,7 @@
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 857780c..6b4a772 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -60,14 +60,11 @@
, mFramesUnderrunCount(0)
, mUnderrunCount(0)
{
- // TODO Handle possible failures to allocate. Move out of constructor?
mFifo = new FifoControllerIndirect(capacityInFrames,
capacityInFrames,
readIndexAddress,
writeIndexAddress);
mStorageOwned = false;
- ALOGD("FifoProcessor: capacityInFrames = %d, bytesPerFrame = %d",
- capacityInFrames, bytesPerFrame);
}
FifoBuffer::~FifoBuffer() {
@@ -132,8 +129,6 @@
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
fifo_frames_t framesToRead = framesLeft;
fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- //ALOGD("FifoProcessor::read() framesAvailable = %d, partIndex = %d",
- // framesAvailable, partIndex);
if (framesAvailable > 0) {
if (framesToRead > framesAvailable) {
framesToRead = framesAvailable;
@@ -143,6 +138,8 @@
destination += numBytes;
framesLeft -= framesToRead;
+ } else {
+ break;
}
partIndex++;
}
@@ -172,6 +169,8 @@
source += numBytes;
framesLeft -= framesToWrite;
+ } else {
+ break;
}
partIndex++;
}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 5637f0d..efbbfc5 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -33,10 +33,6 @@
case AAUDIO_FORMAT_PCM_I16:
size = sizeof(int16_t);
break;
- case AAUDIO_FORMAT_PCM_I32:
- case AAUDIO_FORMAT_PCM_I8_24:
- size = sizeof(int32_t);
- break;
case AAUDIO_FORMAT_PCM_FLOAT:
size = sizeof(float);
break;
@@ -61,7 +57,7 @@
}
}
-void AAudioConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
+void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination) {
for (int i = 0; i < numSamples; i++) {
destination[i] = source[i] * (1.0f / 32768.0f);
}
@@ -82,6 +78,8 @@
status = INVALID_OPERATION;
break;
case AAUDIO_ERROR_UNEXPECTED_VALUE: // TODO redundant?
+ case AAUDIO_ERROR_INVALID_RATE:
+ case AAUDIO_ERROR_INVALID_FORMAT:
case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
status = BAD_VALUE;
break;
@@ -107,7 +105,7 @@
result = AAUDIO_ERROR_INVALID_HANDLE;
break;
case DEAD_OBJECT:
- result = AAUDIO_ERROR_DISCONNECTED;
+ result = AAUDIO_ERROR_NO_SERVICE;
break;
case INVALID_OPERATION:
result = AAUDIO_ERROR_INVALID_STATE;
@@ -135,12 +133,6 @@
case AAUDIO_FORMAT_PCM_FLOAT:
androidFormat = AUDIO_FORMAT_PCM_FLOAT;
break;
- case AAUDIO_FORMAT_PCM_I8_24:
- androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
- break;
- case AAUDIO_FORMAT_PCM_I32:
- androidFormat = AUDIO_FORMAT_PCM_32_BIT;
- break;
default:
androidFormat = AUDIO_FORMAT_DEFAULT;
ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
@@ -158,12 +150,6 @@
case AUDIO_FORMAT_PCM_FLOAT:
aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
break;
- case AUDIO_FORMAT_PCM_32_BIT:
- aaudioFormat = AAUDIO_FORMAT_PCM_I32;
- break;
- case AUDIO_FORMAT_PCM_8_24_BIT:
- aaudioFormat = AAUDIO_FORMAT_PCM_I8_24;
- break;
default:
aaudioFormat = AAUDIO_FORMAT_INVALID;
ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index ad130e0..166534f 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+ name: "libaudioclient_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
cc_library_shared {
name: "libaudioclient",
srcs: [
@@ -26,17 +32,20 @@
"libaudioutils",
],
export_shared_lib_headers: ["libbinder"],
- local_include_dirs: ["include"],
- export_include_dirs: ["include"],
+
+ local_include_dirs: ["include/media"],
+ header_libs: ["libaudioclient_headers"],
+ export_header_lib_headers: ["libaudioclient_headers"],
+
// for memory heap analysis
static_libs: [
"libc_malloc_debug_backtrace",
"libc_logging",
],
cflags: [
+ "-Wall",
"-Werror",
"-Wno-error=deprecated-declarations",
- "-Wall",
],
sanitize: {
misc_undefined : [
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 4e2a0d5..858b5cc 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,7 +24,7 @@
#include <binder/Parcel.h>
-#include <media/IAudioFlinger.h>
+#include "IAudioFlinger.h"
namespace android {
diff --git a/media/libaudioclient/include/IAudioFlinger.h b/media/libaudioclient/include/IAudioFlinger.h
deleted file mode 100644
index 8c5e61a..0000000
--- a/media/libaudioclient/include/IAudioFlinger.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IAUDIOFLINGER_H
-#define ANDROID_IAUDIOFLINGER_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <media/IAudioTrack.h>
-#include <media/IAudioRecord.h>
-#include <media/IAudioFlingerClient.h>
-#include <system/audio.h>
-#include <system/audio_effect.h>
-#include <system/audio_policy.h>
-#include <media/IEffect.h>
-#include <media/IEffectClient.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class IAudioFlinger : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioFlinger);
-
-
- // invariant on exit for all APIs that return an sp<>:
- // (return value != 0) == (*status == NO_ERROR)
-
- /* create an audio track and registers it with AudioFlinger.
- * return null if the track cannot be created.
- */
- virtual sp<IAudioTrack> createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *pFrameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- audio_session_t *sessionId,
- int clientUid,
- status_t *status,
- audio_port_handle_t portId) = 0;
-
- virtual sp<IAudioRecord> openRecord(
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& callingPackage,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers, // return value 0 means it follows cblk
- status_t *status,
- audio_port_handle_t portId) = 0;
-
- // FIXME Surprisingly, format/latency don't work for input handles
-
- /* query the audio hardware state. This state never changes,
- * and therefore can be cached.
- */
- virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const = 0;
-
- // reserved; formerly channelCount()
-
- virtual audio_format_t format(audio_io_handle_t output) const = 0;
- virtual size_t frameCount(audio_io_handle_t ioHandle) const = 0;
-
- // return estimated latency in milliseconds
- virtual uint32_t latency(audio_io_handle_t output) const = 0;
-
- /* set/get the audio hardware state. This will probably be used by
- * the preference panel, mostly.
- */
- virtual status_t setMasterVolume(float value) = 0;
- virtual status_t setMasterMute(bool muted) = 0;
-
- virtual float masterVolume() const = 0;
- virtual bool masterMute() const = 0;
-
- /* set/get stream type state. This will probably be used by
- * the preference panel, mostly.
- */
- virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
- audio_io_handle_t output) = 0;
- virtual status_t setStreamMute(audio_stream_type_t stream, bool muted) = 0;
-
- virtual float streamVolume(audio_stream_type_t stream,
- audio_io_handle_t output) const = 0;
- virtual bool streamMute(audio_stream_type_t stream) const = 0;
-
- // set audio mode
- virtual status_t setMode(audio_mode_t mode) = 0;
-
- // mic mute/state
- virtual status_t setMicMute(bool state) = 0;
- virtual bool getMicMute() const = 0;
-
- virtual status_t setParameters(audio_io_handle_t ioHandle,
- const String8& keyValuePairs) = 0;
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
- const = 0;
-
- // Register an object to receive audio input/output change and track notifications.
- // For a given calling pid, AudioFlinger disregards any registrations after the first.
- // Thus the IAudioFlingerClient must be a singleton per process.
- virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
-
- // retrieve the audio recording buffer size
- // FIXME This API assumes a route, and so should be deprecated.
- virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
- audio_channel_mask_t channelMask) const = 0;
-
- virtual status_t openOutput(audio_module_handle_t module,
- audio_io_handle_t *output,
- audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
- uint32_t *latencyMs,
- audio_output_flags_t flags) = 0;
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
- audio_io_handle_t output2) = 0;
- virtual status_t closeOutput(audio_io_handle_t output) = 0;
- virtual status_t suspendOutput(audio_io_handle_t output) = 0;
- virtual status_t restoreOutput(audio_io_handle_t output) = 0;
-
- virtual status_t openInput(audio_module_handle_t module,
- audio_io_handle_t *input,
- audio_config_t *config,
- audio_devices_t *device,
- const String8& address,
- audio_source_t source,
- audio_input_flags_t flags) = 0;
- virtual status_t closeInput(audio_io_handle_t input) = 0;
-
- virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
-
- virtual status_t setVoiceVolume(float volume) = 0;
-
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
- audio_io_handle_t output) const = 0;
-
- virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
-
- virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
-
- virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
- virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
-
- virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
-
- virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
-
- virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
- effect_descriptor_t *pDescriptor) const = 0;
-
- virtual sp<IEffect> createEffect(
- effect_descriptor_t *pDesc,
- const sp<IEffectClient>& client,
- int32_t priority,
- // AudioFlinger doesn't take over handle reference from client
- audio_io_handle_t output,
- audio_session_t sessionId,
- const String16& callingPackage,
- pid_t pid,
- status_t *status,
- int *id,
- int *enabled) = 0;
-
- virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
- audio_io_handle_t dstOutput) = 0;
-
- virtual audio_module_handle_t loadHwModule(const char *name) = 0;
-
- // helpers for android.media.AudioManager.getProperty(), see description there for meaning
- // FIXME move these APIs to AudioPolicy to permit a more accurate implementation
- // that looks on primary device for a stream with fast flag, primary flag, or first one.
- virtual uint32_t getPrimaryOutputSamplingRate() = 0;
- virtual size_t getPrimaryOutputFrameCount() = 0;
-
- // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
- // and should be called at most once. For a definition of what "low RAM" means, see
- // android.app.ActivityManager.isLowRamDevice().
- virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
-
- /* List available audio ports and their attributes */
- virtual status_t listAudioPorts(unsigned int *num_ports,
- struct audio_port *ports) = 0;
-
- /* Get attributes for a given audio port */
- virtual status_t getAudioPort(struct audio_port *port) = 0;
-
- /* Create an audio patch between several source and sink ports */
- virtual status_t createAudioPatch(const struct audio_patch *patch,
- audio_patch_handle_t *handle) = 0;
-
- /* Release an audio patch */
- virtual status_t releaseAudioPatch(audio_patch_handle_t handle) = 0;
-
- /* List existing audio patches */
- virtual status_t listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches) = 0;
- /* Set audio port configuration */
- virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
-
- /* Get the HW synchronization source used for an audio session */
- virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
-
- /* Indicate JAVA services are ready (scheduling, power management ...) */
- virtual status_t systemReady() = 0;
-
- // Returns the number of frames per audio HAL buffer.
- virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-
-class BnAudioFlinger : public BnInterface<IAudioFlinger>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_IAUDIOFLINGER_H
diff --git a/media/libaudioclient/include/AudioBufferProvider.h b/media/libaudioclient/include/media/AudioBufferProvider.h
similarity index 100%
rename from media/libaudioclient/include/AudioBufferProvider.h
rename to media/libaudioclient/include/media/AudioBufferProvider.h
diff --git a/media/libaudioclient/include/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
similarity index 100%
rename from media/libaudioclient/include/AudioEffect.h
rename to media/libaudioclient/include/media/AudioEffect.h
diff --git a/media/libaudioclient/include/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
similarity index 100%
rename from media/libaudioclient/include/AudioIoDescriptor.h
rename to media/libaudioclient/include/media/AudioIoDescriptor.h
diff --git a/media/libaudioclient/include/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
similarity index 100%
rename from media/libaudioclient/include/AudioMixer.h
rename to media/libaudioclient/include/media/AudioMixer.h
diff --git a/media/libaudioclient/include/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
similarity index 100%
rename from media/libaudioclient/include/AudioParameter.h
rename to media/libaudioclient/include/media/AudioParameter.h
diff --git a/media/libaudioclient/include/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
similarity index 100%
rename from media/libaudioclient/include/AudioPolicy.h
rename to media/libaudioclient/include/media/AudioPolicy.h
diff --git a/media/libaudioclient/include/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
similarity index 96%
rename from media/libaudioclient/include/AudioPolicyHelper.h
rename to media/libaudioclient/include/media/AudioPolicyHelper.h
index 04f6a20..854057d 100644
--- a/media/libaudioclient/include/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -18,6 +18,8 @@
#include <system/audio.h>
+// TODO: fix this among dependencies
+__attribute__((unused))
static audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
{
// flags to stream type mapping
@@ -63,6 +65,8 @@
}
}
+// TODO: fix this among dependencies
+__attribute__((unused))
static void stream_type_to_audio_attributes(audio_stream_type_t streamType,
audio_attributes_t *attr) {
memset(attr, 0, sizeof(audio_attributes_t));
diff --git a/media/libaudioclient/include/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
similarity index 100%
rename from media/libaudioclient/include/AudioRecord.h
rename to media/libaudioclient/include/media/AudioRecord.h
diff --git a/media/libaudioclient/include/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
similarity index 100%
rename from media/libaudioclient/include/AudioSystem.h
rename to media/libaudioclient/include/media/AudioSystem.h
diff --git a/media/libaudioclient/include/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
similarity index 100%
rename from media/libaudioclient/include/AudioTimestamp.h
rename to media/libaudioclient/include/media/AudioTimestamp.h
diff --git a/media/libaudioclient/include/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
similarity index 100%
rename from media/libaudioclient/include/AudioTrack.h
rename to media/libaudioclient/include/media/AudioTrack.h
diff --git a/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
similarity index 100%
rename from include/media/IAudioFlinger.h
rename to media/libaudioclient/include/media/IAudioFlinger.h
diff --git a/media/libaudioclient/include/IAudioFlingerClient.h b/media/libaudioclient/include/media/IAudioFlingerClient.h
similarity index 100%
rename from media/libaudioclient/include/IAudioFlingerClient.h
rename to media/libaudioclient/include/media/IAudioFlingerClient.h
diff --git a/media/libaudioclient/include/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
similarity index 100%
rename from media/libaudioclient/include/IAudioPolicyService.h
rename to media/libaudioclient/include/media/IAudioPolicyService.h
diff --git a/media/libaudioclient/include/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
similarity index 100%
rename from media/libaudioclient/include/IAudioPolicyServiceClient.h
rename to media/libaudioclient/include/media/IAudioPolicyServiceClient.h
diff --git a/media/libaudioclient/include/IAudioRecord.h b/media/libaudioclient/include/media/IAudioRecord.h
similarity index 100%
rename from media/libaudioclient/include/IAudioRecord.h
rename to media/libaudioclient/include/media/IAudioRecord.h
diff --git a/media/libaudioclient/include/IAudioTrack.h b/media/libaudioclient/include/media/IAudioTrack.h
similarity index 100%
rename from media/libaudioclient/include/IAudioTrack.h
rename to media/libaudioclient/include/media/IAudioTrack.h
diff --git a/media/libaudioclient/include/IEffect.h b/media/libaudioclient/include/media/IEffect.h
similarity index 100%
rename from media/libaudioclient/include/IEffect.h
rename to media/libaudioclient/include/media/IEffect.h
diff --git a/media/libaudioclient/include/IEffectClient.h b/media/libaudioclient/include/media/IEffectClient.h
similarity index 100%
rename from media/libaudioclient/include/IEffectClient.h
rename to media/libaudioclient/include/media/IEffectClient.h
diff --git a/media/libaudioclient/include/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
similarity index 100%
rename from media/libaudioclient/include/ToneGenerator.h
rename to media/libaudioclient/include/media/ToneGenerator.h
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
index 032b3e9..68a1f7b 100644
--- a/media/libaudiohal/Android.mk
+++ b/media/libaudiohal/Android.mk
@@ -4,8 +4,6 @@
LOCAL_SHARED_LIBRARIES := \
libcutils \
- libeffects \
- libhardware \
liblog \
libutils
@@ -22,6 +20,10 @@
EffectsFactoryHalLocal.cpp \
StreamHalLocal.cpp
+LOCAL_SHARED_LIBRARIES += \
+ libeffects \
+ libhardware
+
else # if !USE_LEGACY_LOCAL_AUDIO_HAL
LOCAL_SRC_FILES := \
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
index 605c059..a8081b7 100644
--- a/media/libaudiohal/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -18,7 +18,6 @@
//#define LOG_NDEBUG 0
#include <cutils/native_handle.h>
-#include <media/EffectsFactoryApi.h>
#include "ConversionHelperHidl.h"
#include "EffectHalHidl.h"
@@ -39,7 +38,7 @@
// static
bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
- return EffectIsNullUuid(pEffectUuid);
+ return memcmp(pEffectUuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t)) == 0;
}
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
diff --git a/media/libeffects/factory/Android.bp b/media/libeffects/factory/Android.bp
index e0e0d13..16680bd 100644
--- a/media/libeffects/factory/Android.bp
+++ b/media/libeffects/factory/Android.bp
@@ -1,6 +1,15 @@
+cc_library_headers {
+ name: "libeffects_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+ header_libs: ["libhardware_headers"],
+ export_header_lib_headers: ["libhardware_headers"],
+}
+
// Effect factory library
cc_library_shared {
name: "libeffects",
+ vendor: true,
srcs: ["EffectsFactory.c"],
shared_libs: [
@@ -11,7 +20,8 @@
include_dirs: ["system/media/audio_effects/include"],
- local_include_dirs:["include"],
+ local_include_dirs:["include/media"],
- export_include_dirs: ["include"],
+ header_libs: ["libeffects_headers"],
+ export_header_lib_headers: ["libeffects_headers"],
}
diff --git a/media/libeffects/factory/include/EffectsFactoryApi.h b/media/libeffects/factory/include/media/EffectsFactoryApi.h
similarity index 100%
rename from media/libeffects/factory/include/EffectsFactoryApi.h
rename to media/libeffects/factory/include/media/EffectsFactoryApi.h
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 4b14543..11a498d 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,5 +1,12 @@
+cc_library_headers {
+ name: "libmedia_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
cc_library {
name: "libmedia_helper",
+ vendor_available: true,
srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
cflags: [
"-Werror",
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index b0bd22e..e2d48a2 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -86,6 +86,8 @@
android.hardware.media.omx@1.0 \
android.hidl.memory@1.0 \
+LOCAL_HEADER_LIBRARIES := libmedia_headers
+
# for memory heap analysis
LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index fdbc869..724b3a0 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -389,7 +389,7 @@
}
}
if (transferBuf != nullptr) { // Using shared buffers.
- if (!transferBuf->isObserved()) {
+ if (!transferBuf->isObserved() && transferBuf != buf) {
// Transfer buffer must be part of a MediaBufferGroup.
ALOGV("adding shared memory buffer %p to local group", transferBuf);
mGroup->add_buffer(transferBuf);
diff --git a/media/libmedia/aidl/android/IGraphicBufferSource.aidl b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
index 325c631..f3c7abc 100644
--- a/media/libmedia/aidl/android/IGraphicBufferSource.aidl
+++ b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
@@ -28,10 +28,10 @@
void setSuspend(boolean suspend, long suspendTimeUs);
void setRepeatPreviousFrameDelayUs(long repeatAfterUs);
void setMaxFps(float maxFps);
- void setTimeLapseConfig(long timePerFrameUs, long timePerCaptureUs);
+ void setTimeLapseConfig(double fps, double captureFps);
void setStartTimeUs(long startTimeUs);
void setStopTimeUs(long stopTimeUs);
void setColorAspects(int aspects);
void setTimeOffsetUs(long timeOffsetsUs);
void signalEndOfInputStream();
-}
\ No newline at end of file
+}
diff --git a/media/libmedia/include/IMediaLogService.h b/media/libmedia/include/IMediaLogService.h
deleted file mode 100644
index 1f5777e..0000000
--- a/media/libmedia/include/IMediaLogService.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIALOGSERVICE_H
-#define ANDROID_IMEDIALOGSERVICE_H
-
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-
-namespace android {
-
-class IMediaLogService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaLogService);
-
- virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0;
- virtual void unregisterWriter(const sp<IMemory>& shared) = 0;
-
-};
-
-class BnMediaLogService: public BnInterface<IMediaLogService>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IMEDIALOGSERVICE_H
diff --git a/media/libmedia/include/AVSyncSettings.h b/media/libmedia/include/media/AVSyncSettings.h
similarity index 100%
rename from media/libmedia/include/AVSyncSettings.h
rename to media/libmedia/include/media/AVSyncSettings.h
diff --git a/media/libmedia/include/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
similarity index 100%
rename from media/libmedia/include/BufferProviders.h
rename to media/libmedia/include/media/BufferProviders.h
diff --git a/media/libmedia/include/BufferingSettings.h b/media/libmedia/include/media/BufferingSettings.h
similarity index 100%
rename from media/libmedia/include/BufferingSettings.h
rename to media/libmedia/include/media/BufferingSettings.h
diff --git a/media/libmedia/include/CharacterEncodingDetector.h b/media/libmedia/include/media/CharacterEncodingDetector.h
similarity index 100%
rename from media/libmedia/include/CharacterEncodingDetector.h
rename to media/libmedia/include/media/CharacterEncodingDetector.h
diff --git a/media/libmedia/include/Crypto.h b/media/libmedia/include/media/Crypto.h
similarity index 100%
rename from media/libmedia/include/Crypto.h
rename to media/libmedia/include/media/Crypto.h
diff --git a/media/libmedia/include/CryptoHal.h b/media/libmedia/include/media/CryptoHal.h
similarity index 100%
rename from media/libmedia/include/CryptoHal.h
rename to media/libmedia/include/media/CryptoHal.h
diff --git a/media/libmedia/include/Drm.h b/media/libmedia/include/media/Drm.h
similarity index 100%
rename from media/libmedia/include/Drm.h
rename to media/libmedia/include/media/Drm.h
diff --git a/media/libmedia/include/DrmHal.h b/media/libmedia/include/media/DrmHal.h
similarity index 100%
rename from media/libmedia/include/DrmHal.h
rename to media/libmedia/include/media/DrmHal.h
diff --git a/media/libmedia/include/DrmPluginPath.h b/media/libmedia/include/media/DrmPluginPath.h
similarity index 100%
rename from media/libmedia/include/DrmPluginPath.h
rename to media/libmedia/include/media/DrmPluginPath.h
diff --git a/media/libmedia/include/DrmSessionClientInterface.h b/media/libmedia/include/media/DrmSessionClientInterface.h
similarity index 100%
rename from media/libmedia/include/DrmSessionClientInterface.h
rename to media/libmedia/include/media/DrmSessionClientInterface.h
diff --git a/media/libmedia/include/DrmSessionManager.h b/media/libmedia/include/media/DrmSessionManager.h
similarity index 100%
rename from media/libmedia/include/DrmSessionManager.h
rename to media/libmedia/include/media/DrmSessionManager.h
diff --git a/media/libmedia/include/ExtendedAudioBufferProvider.h b/media/libmedia/include/media/ExtendedAudioBufferProvider.h
similarity index 100%
rename from media/libmedia/include/ExtendedAudioBufferProvider.h
rename to media/libmedia/include/media/ExtendedAudioBufferProvider.h
diff --git a/media/libmedia/include/ICrypto.h b/media/libmedia/include/media/ICrypto.h
similarity index 100%
rename from media/libmedia/include/ICrypto.h
rename to media/libmedia/include/media/ICrypto.h
diff --git a/media/libmedia/include/IDataSource.h b/media/libmedia/include/media/IDataSource.h
similarity index 100%
rename from media/libmedia/include/IDataSource.h
rename to media/libmedia/include/media/IDataSource.h
diff --git a/media/libmedia/include/IDrm.h b/media/libmedia/include/media/IDrm.h
similarity index 100%
rename from media/libmedia/include/IDrm.h
rename to media/libmedia/include/media/IDrm.h
diff --git a/media/libmedia/include/IDrmClient.h b/media/libmedia/include/media/IDrmClient.h
similarity index 100%
rename from media/libmedia/include/IDrmClient.h
rename to media/libmedia/include/media/IDrmClient.h
diff --git a/media/libmedia/include/IHDCP.h b/media/libmedia/include/media/IHDCP.h
similarity index 100%
rename from media/libmedia/include/IHDCP.h
rename to media/libmedia/include/media/IHDCP.h
diff --git a/media/libmedia/include/IMediaCodecList.h b/media/libmedia/include/media/IMediaCodecList.h
similarity index 100%
rename from media/libmedia/include/IMediaCodecList.h
rename to media/libmedia/include/media/IMediaCodecList.h
diff --git a/media/libmedia/include/IMediaCodecService.h b/media/libmedia/include/media/IMediaCodecService.h
similarity index 100%
rename from media/libmedia/include/IMediaCodecService.h
rename to media/libmedia/include/media/IMediaCodecService.h
diff --git a/media/libmedia/include/IMediaDeathNotifier.h b/media/libmedia/include/media/IMediaDeathNotifier.h
similarity index 100%
rename from media/libmedia/include/IMediaDeathNotifier.h
rename to media/libmedia/include/media/IMediaDeathNotifier.h
diff --git a/media/libmedia/include/IMediaDrmService.h b/media/libmedia/include/media/IMediaDrmService.h
similarity index 100%
rename from media/libmedia/include/IMediaDrmService.h
rename to media/libmedia/include/media/IMediaDrmService.h
diff --git a/media/libmedia/include/IMediaExtractor.h b/media/libmedia/include/media/IMediaExtractor.h
similarity index 100%
rename from media/libmedia/include/IMediaExtractor.h
rename to media/libmedia/include/media/IMediaExtractor.h
diff --git a/media/libmedia/include/IMediaExtractorService.h b/media/libmedia/include/media/IMediaExtractorService.h
similarity index 100%
rename from media/libmedia/include/IMediaExtractorService.h
rename to media/libmedia/include/media/IMediaExtractorService.h
diff --git a/media/libmedia/include/IMediaHTTPConnection.h b/media/libmedia/include/media/IMediaHTTPConnection.h
similarity index 100%
rename from media/libmedia/include/IMediaHTTPConnection.h
rename to media/libmedia/include/media/IMediaHTTPConnection.h
diff --git a/media/libmedia/include/IMediaHTTPService.h b/media/libmedia/include/media/IMediaHTTPService.h
similarity index 100%
rename from media/libmedia/include/IMediaHTTPService.h
rename to media/libmedia/include/media/IMediaHTTPService.h
diff --git a/include/media/IMediaLogService.h b/media/libmedia/include/media/IMediaLogService.h
similarity index 99%
rename from include/media/IMediaLogService.h
rename to media/libmedia/include/media/IMediaLogService.h
index 0f09e0d..1df1907 100644
--- a/include/media/IMediaLogService.h
+++ b/media/libmedia/include/media/IMediaLogService.h
@@ -30,8 +30,8 @@
virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0;
virtual void unregisterWriter(const sp<IMemory>& shared) = 0;
- virtual void requestMergeWakeup() = 0;
+ virtual void requestMergeWakeup() = 0;
};
class BnMediaLogService: public BnInterface<IMediaLogService>
diff --git a/media/libmedia/include/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
similarity index 100%
rename from media/libmedia/include/IMediaMetadataRetriever.h
rename to media/libmedia/include/media/IMediaMetadataRetriever.h
diff --git a/media/libmedia/include/IMediaPlayer.h b/media/libmedia/include/media/IMediaPlayer.h
similarity index 100%
rename from media/libmedia/include/IMediaPlayer.h
rename to media/libmedia/include/media/IMediaPlayer.h
diff --git a/media/libmedia/include/IMediaPlayerClient.h b/media/libmedia/include/media/IMediaPlayerClient.h
similarity index 100%
rename from media/libmedia/include/IMediaPlayerClient.h
rename to media/libmedia/include/media/IMediaPlayerClient.h
diff --git a/media/libmedia/include/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
similarity index 100%
rename from media/libmedia/include/IMediaPlayerService.h
rename to media/libmedia/include/media/IMediaPlayerService.h
diff --git a/media/libmedia/include/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
similarity index 100%
rename from media/libmedia/include/IMediaRecorder.h
rename to media/libmedia/include/media/IMediaRecorder.h
diff --git a/media/libmedia/include/IMediaRecorderClient.h b/media/libmedia/include/media/IMediaRecorderClient.h
similarity index 100%
rename from media/libmedia/include/IMediaRecorderClient.h
rename to media/libmedia/include/media/IMediaRecorderClient.h
diff --git a/media/libmedia/include/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
similarity index 100%
rename from media/libmedia/include/IMediaSource.h
rename to media/libmedia/include/media/IMediaSource.h
diff --git a/media/libmedia/include/IOMX.h b/media/libmedia/include/media/IOMX.h
similarity index 100%
rename from media/libmedia/include/IOMX.h
rename to media/libmedia/include/media/IOMX.h
diff --git a/media/libmedia/include/IRemoteDisplay.h b/media/libmedia/include/media/IRemoteDisplay.h
similarity index 100%
rename from media/libmedia/include/IRemoteDisplay.h
rename to media/libmedia/include/media/IRemoteDisplay.h
diff --git a/media/libmedia/include/IRemoteDisplayClient.h b/media/libmedia/include/media/IRemoteDisplayClient.h
similarity index 100%
rename from media/libmedia/include/IRemoteDisplayClient.h
rename to media/libmedia/include/media/IRemoteDisplayClient.h
diff --git a/media/libmedia/include/IResourceManagerClient.h b/media/libmedia/include/media/IResourceManagerClient.h
similarity index 100%
rename from media/libmedia/include/IResourceManagerClient.h
rename to media/libmedia/include/media/IResourceManagerClient.h
diff --git a/media/libmedia/include/IResourceManagerService.h b/media/libmedia/include/media/IResourceManagerService.h
similarity index 100%
rename from media/libmedia/include/IResourceManagerService.h
rename to media/libmedia/include/media/IResourceManagerService.h
diff --git a/media/libmedia/include/IStreamSource.h b/media/libmedia/include/media/IStreamSource.h
similarity index 100%
rename from media/libmedia/include/IStreamSource.h
rename to media/libmedia/include/media/IStreamSource.h
diff --git a/media/libmedia/include/JetPlayer.h b/media/libmedia/include/media/JetPlayer.h
similarity index 100%
rename from media/libmedia/include/JetPlayer.h
rename to media/libmedia/include/media/JetPlayer.h
diff --git a/media/libmedia/include/LinearMap.h b/media/libmedia/include/media/LinearMap.h
similarity index 100%
rename from media/libmedia/include/LinearMap.h
rename to media/libmedia/include/media/LinearMap.h
diff --git a/media/libmedia/include/MediaCodecBuffer.h b/media/libmedia/include/media/MediaCodecBuffer.h
similarity index 100%
rename from media/libmedia/include/MediaCodecBuffer.h
rename to media/libmedia/include/media/MediaCodecBuffer.h
diff --git a/media/libmedia/include/MediaCodecInfo.h b/media/libmedia/include/media/MediaCodecInfo.h
similarity index 100%
rename from media/libmedia/include/MediaCodecInfo.h
rename to media/libmedia/include/media/MediaCodecInfo.h
diff --git a/media/libmedia/include/MediaDefs.h b/media/libmedia/include/media/MediaDefs.h
similarity index 100%
rename from media/libmedia/include/MediaDefs.h
rename to media/libmedia/include/media/MediaDefs.h
diff --git a/media/libmedia/include/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
similarity index 100%
rename from media/libmedia/include/MediaMetadataRetrieverInterface.h
rename to media/libmedia/include/media/MediaMetadataRetrieverInterface.h
diff --git a/media/libmedia/include/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
similarity index 100%
rename from media/libmedia/include/MediaProfiles.h
rename to media/libmedia/include/media/MediaProfiles.h
diff --git a/media/libmedia/include/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
similarity index 100%
rename from media/libmedia/include/MediaRecorderBase.h
rename to media/libmedia/include/media/MediaRecorderBase.h
diff --git a/media/libmedia/include/MediaResource.h b/media/libmedia/include/media/MediaResource.h
similarity index 100%
rename from media/libmedia/include/MediaResource.h
rename to media/libmedia/include/media/MediaResource.h
diff --git a/media/libmedia/include/MediaResourcePolicy.h b/media/libmedia/include/media/MediaResourcePolicy.h
similarity index 100%
rename from media/libmedia/include/MediaResourcePolicy.h
rename to media/libmedia/include/media/MediaResourcePolicy.h
diff --git a/media/libmedia/include/MemoryLeakTrackUtil.h b/media/libmedia/include/media/MemoryLeakTrackUtil.h
similarity index 100%
rename from media/libmedia/include/MemoryLeakTrackUtil.h
rename to media/libmedia/include/media/MemoryLeakTrackUtil.h
diff --git a/media/libmedia/include/Metadata.h b/media/libmedia/include/media/Metadata.h
similarity index 100%
rename from media/libmedia/include/Metadata.h
rename to media/libmedia/include/media/Metadata.h
diff --git a/media/libmedia/include/MidiDeviceInfo.h b/media/libmedia/include/media/MidiDeviceInfo.h
similarity index 100%
rename from media/libmedia/include/MidiDeviceInfo.h
rename to media/libmedia/include/media/MidiDeviceInfo.h
diff --git a/media/libmedia/include/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
similarity index 100%
rename from media/libmedia/include/MidiIoWrapper.h
rename to media/libmedia/include/media/MidiIoWrapper.h
diff --git a/media/libmedia/include/Modulo.h b/media/libmedia/include/media/Modulo.h
similarity index 100%
rename from media/libmedia/include/Modulo.h
rename to media/libmedia/include/media/Modulo.h
diff --git a/media/libmedia/include/OMXBuffer.h b/media/libmedia/include/media/OMXBuffer.h
similarity index 100%
rename from media/libmedia/include/OMXBuffer.h
rename to media/libmedia/include/media/OMXBuffer.h
diff --git a/media/libmedia/include/OMXFenceParcelable.h b/media/libmedia/include/media/OMXFenceParcelable.h
similarity index 100%
rename from media/libmedia/include/OMXFenceParcelable.h
rename to media/libmedia/include/media/OMXFenceParcelable.h
diff --git a/media/libmedia/include/PluginLoader.h b/media/libmedia/include/media/PluginLoader.h
similarity index 100%
rename from media/libmedia/include/PluginLoader.h
rename to media/libmedia/include/media/PluginLoader.h
diff --git a/media/libmedia/include/RecordBufferConverter.h b/media/libmedia/include/media/RecordBufferConverter.h
similarity index 100%
rename from media/libmedia/include/RecordBufferConverter.h
rename to media/libmedia/include/media/RecordBufferConverter.h
diff --git a/media/libmedia/include/RingBuffer.h b/media/libmedia/include/media/RingBuffer.h
similarity index 100%
rename from media/libmedia/include/RingBuffer.h
rename to media/libmedia/include/media/RingBuffer.h
diff --git a/media/libmedia/include/SharedLibrary.h b/media/libmedia/include/media/SharedLibrary.h
similarity index 100%
rename from media/libmedia/include/SharedLibrary.h
rename to media/libmedia/include/media/SharedLibrary.h
diff --git a/media/libmedia/include/SingleStateQueue.h b/media/libmedia/include/media/SingleStateQueue.h
similarity index 100%
rename from media/libmedia/include/SingleStateQueue.h
rename to media/libmedia/include/media/SingleStateQueue.h
diff --git a/media/libmedia/include/StringArray.h b/media/libmedia/include/media/StringArray.h
similarity index 100%
rename from media/libmedia/include/StringArray.h
rename to media/libmedia/include/media/StringArray.h
diff --git a/media/libmedia/include/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
similarity index 99%
rename from media/libmedia/include/TypeConverter.h
rename to media/libmedia/include/media/TypeConverter.h
index e262eef..cb8a307 100644
--- a/media/libmedia/include/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -25,8 +25,8 @@
#include <utils/Vector.h>
#include <utils/SortedVector.h>
+#include <media/AudioParameter.h>
#include "convert.h"
-#include "AudioParameter.h"
namespace android {
diff --git a/media/libmedia/include/Visualizer.h b/media/libmedia/include/media/Visualizer.h
similarity index 100%
rename from media/libmedia/include/Visualizer.h
rename to media/libmedia/include/media/Visualizer.h
diff --git a/media/libmedia/include/convert.h b/media/libmedia/include/media/convert.h
similarity index 100%
rename from media/libmedia/include/convert.h
rename to media/libmedia/include/media/convert.h
diff --git a/media/libmedia/include/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
similarity index 100%
rename from media/libmedia/include/mediametadataretriever.h
rename to media/libmedia/include/media/mediametadataretriever.h
diff --git a/media/libmedia/include/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
similarity index 100%
rename from media/libmedia/include/mediaplayer.h
rename to media/libmedia/include/media/mediaplayer.h
diff --git a/media/libmedia/include/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
similarity index 100%
rename from media/libmedia/include/mediarecorder.h
rename to media/libmedia/include/media/mediarecorder.h
diff --git a/media/libmedia/include/mediascanner.h b/media/libmedia/include/media/mediascanner.h
similarity index 100%
rename from media/libmedia/include/mediascanner.h
rename to media/libmedia/include/media/mediascanner.h
diff --git a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
index b4e2975..4c543fa 100644
--- a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
@@ -53,9 +53,8 @@
}
BnStatus LWGraphicBufferSource::setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) {
- return toBinderStatus(mBase->setTimeLapseConfig(
- timePerFrameUs, timePerCaptureUs));
+ double fps, double captureFps) {
+ return toBinderStatus(mBase->setTimeLapseConfig(fps, captureFps));
}
BnStatus LWGraphicBufferSource::setStartTimeUs(
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 95f378f..e1d762f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -163,7 +163,7 @@
// TBD mTrackEveryTimeDurationUs = 0;
mAnalyticsItem->setInt32(kRecorderCaptureFpsEnable, mCaptureFpsEnable);
mAnalyticsItem->setDouble(kRecorderCaptureFps, mCaptureFps);
- // TBD mTimeBetweenCaptureUs = -1;
+ // TBD mCaptureFps = -1.0;
// TBD mCameraSourceTimeLapse = NULL;
// TBD mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
// TBD mEncoderProfiles = MediaProfiles::getInstance();
@@ -709,26 +709,11 @@
status_t StagefrightRecorder::setParamCaptureFps(double fps) {
ALOGV("setParamCaptureFps: %.2f", fps);
- constexpr int64_t k1E12 = 1000000000000ll;
- int64_t fpsx1e12 = k1E12 * fps;
- if (fpsx1e12 == 0) {
- ALOGE("FPS is zero or too small");
+ if (!(fps >= 1.0 / 86400)) {
+ ALOGE("FPS is too small");
return BAD_VALUE;
}
-
- // This does not overflow since 10^6 * 10^12 < 2^63
- int64_t timeUs = 1000000ll * k1E12 / fpsx1e12;
-
- // Not allowing time more than a day and a millisecond for error margin.
- // Note: 1e12 / 86400 = 11574074.(074) and 1e18 / 11574074 = 86400000553;
- // therefore 1 ms of margin should be sufficient.
- if (timeUs <= 0 || timeUs > 86400001000ll) {
- ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", (long long)timeUs);
- return BAD_VALUE;
- }
-
mCaptureFps = fps;
- mTimeBetweenCaptureUs = timeUs;
return OK;
}
@@ -1582,16 +1567,15 @@
videoSize.width = mVideoWidth;
videoSize.height = mVideoHeight;
if (mCaptureFpsEnable) {
- if (mTimeBetweenCaptureUs < 0) {
- ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
- (long long)mTimeBetweenCaptureUs);
+ if (!(mCaptureFps > 0.)) {
+ ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
return BAD_VALUE;
}
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, mClientPid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenCaptureUs);
+ std::llround(1e6 / mCaptureFps));
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
@@ -1687,12 +1671,11 @@
// set up time lapse/slow motion for surface source
if (mCaptureFpsEnable) {
- if (mTimeBetweenCaptureUs <= 0) {
- ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
- (long long)mTimeBetweenCaptureUs);
+ if (!(mCaptureFps > 0.)) {
+ ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
return BAD_VALUE;
}
- format->setInt64("time-lapse", mTimeBetweenCaptureUs);
+ format->setDouble("time-lapse-fps", mCaptureFps);
}
}
@@ -2083,8 +2066,7 @@
mMaxFileSizeBytes = 0;
mTrackEveryTimeDurationUs = 0;
mCaptureFpsEnable = false;
- mCaptureFps = 0.0;
- mTimeBetweenCaptureUs = -1;
+ mCaptureFps = -1.0;
mCameraSourceTimeLapse = NULL;
mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
mEncoderProfiles = MediaProfiles::getInstance();
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index be5c0c1..2486b76 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -26,7 +26,7 @@
PipeReader::PipeReader(Pipe& pipe) :
NBAIO_Source(pipe.mFormat),
- mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/),
+ mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, true /*flush*/),
mFramesOverrun(0),
mOverruns(0)
{
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 63b9571..8b91541 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -546,8 +546,8 @@
mRepeatFrameDelayUs(-1ll),
mMaxPtsGapUs(-1ll),
mMaxFps(-1),
- mTimePerFrameUs(-1ll),
- mTimePerCaptureUs(-1ll),
+ mFps(-1.0),
+ mCaptureFps(-1.0),
mCreateInputBuffersSuspended(false),
mLatency(0),
mTunneled(false),
@@ -1802,8 +1802,8 @@
mMaxFps = -1;
}
- if (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) {
- mTimePerCaptureUs = -1ll;
+ if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
+ mCaptureFps = -1.0;
}
if (!msg->findInt32(
@@ -3739,17 +3739,18 @@
def.nBufferSize = (video_def->nStride * video_def->nSliceHeight * 3) / 2;
- float frameRate;
- if (!msg->findFloat("frame-rate", &frameRate)) {
+ float framerate;
+ if (!msg->findFloat("frame-rate", &framerate)) {
int32_t tmp;
if (!msg->findInt32("frame-rate", &tmp)) {
return INVALID_OPERATION;
}
- frameRate = (float)tmp;
- mTimePerFrameUs = (int64_t) (1000000.0f / frameRate);
+ mFps = (double)tmp;
+ } else {
+ mFps = (double)framerate;
}
- video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
+ video_def->xFramerate = (OMX_U32)(mFps * 65536);
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
// this is redundant as it was already set up in setVideoPortFormatType
// FIXME for now skip this only for flexible YUV formats
@@ -6597,11 +6598,10 @@
}
}
- if (mCodec->mTimePerCaptureUs > 0ll
- && mCodec->mTimePerFrameUs > 0ll) {
+ if (mCodec->mCaptureFps > 0. && mCodec->mFps > 0.) {
err = statusFromBinderStatus(
mCodec->mGraphicBufferSource->setTimeLapseConfig(
- mCodec->mTimePerFrameUs, mCodec->mTimePerCaptureUs));
+ mCodec->mFps, mCodec->mCaptureFps));
if (err != OK) {
ALOGE("[%s] Unable to configure time lapse (err %d)",
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 61b8f9d..372b11a 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -78,6 +78,7 @@
libaudioutils \
libbinder \
libcamera_client \
+ libcrypto \
libcutils \
libdl \
libdrmframework \
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index 8e4d064..cb62d92 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -199,6 +199,7 @@
}
void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
+ Mutex::Autolock autoLock(mLock);
mCondition.signal();
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index bbcea51..00cf142 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -26,6 +26,7 @@
#include "include/avc_utils.h"
#include "include/ID3.h"
#include "mpeg2ts/AnotherPacketSource.h"
+#include "mpeg2ts/HlsSampleDecryptor.h"
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -36,7 +37,6 @@
#include <ctype.h>
#include <inttypes.h>
-#include <openssl/aes.h>
#define FLOGV(fmt, ...) ALOGV("[fetcher-%d] " fmt, mFetcherID, ##__VA_ARGS__)
#define FSLOGV(stream, fmt, ...) ALOGV("[fetcher-%d] [%s] " fmt, mFetcherID, \
@@ -167,11 +167,15 @@
mFirstPTSValid(false),
mFirstTimeUs(-1ll),
mVideoBuffer(new AnotherPacketSource(NULL)),
+ mSampleAesKeyItemChanged(false),
mThresholdRatio(-1.0f),
mDownloadState(new DownloadState()),
mHasMetadata(false) {
memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
mHTTPDownloader = mSession->getHTTPDownloader();
+
+ memset(mKeyData, 0, sizeof(mKeyData));
+ memset(mAESInitVec, 0, sizeof(mAESInitVec));
}
PlaylistFetcher::~PlaylistFetcher() {
@@ -306,6 +310,15 @@
}
}
+ // TODO: Revise this when we add support for KEYFORMAT
+ // If method has changed (e.g., -> NONE); sufficient to check at the segment boundary
+ if (mSampleAesKeyItem != NULL && first && found && method != "SAMPLE-AES") {
+ ALOGI("decryptBuffer: resetting mSampleAesKeyItem(%p) with method %s",
+ mSampleAesKeyItem.get(), method.c_str());
+ mSampleAesKeyItem = NULL;
+ mSampleAesKeyItemChanged = true;
+ }
+
if (!found) {
method = "NONE";
}
@@ -313,6 +326,8 @@
if (method == "NONE") {
return OK;
+ } else if (method == "SAMPLE-AES") {
+ ALOGV("decryptBuffer: Non-Widevine SAMPLE-AES is supported now.");
} else if (!(method == "AES-128")) {
ALOGE("Unsupported cipher method '%s'", method.c_str());
return ERROR_UNSUPPORTED;
@@ -345,6 +360,79 @@
mAESKeyForURI.add(keyURI, key);
}
+ if (first) {
+ // If decrypting the first block in a file, read the iv from the manifest
+ // or derive the iv from the file's sequence number.
+
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ AString iv;
+ if (itemMeta->findString("cipher-iv", &iv)) {
+ if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
+ || iv.size() > 16 * 2 + 2) {
+ ALOGE("malformed cipher IV '%s'.", iv.c_str());
+ return ERROR_MALFORMED;
+ }
+
+ while (iv.size() < 16 * 2 + 2) {
+ iv.insert("0", 1, 2);
+ }
+
+ memset(AESInitVec, 0, sizeof(AESInitVec));
+ for (size_t i = 0; i < 16; ++i) {
+ char c1 = tolower(iv.c_str()[2 + 2 * i]);
+ char c2 = tolower(iv.c_str()[3 + 2 * i]);
+ if (!isxdigit(c1) || !isxdigit(c2)) {
+ ALOGE("malformed cipher IV '%s'.", iv.c_str());
+ return ERROR_MALFORMED;
+ }
+ uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
+ uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
+
+ AESInitVec[i] = nibble1 << 4 | nibble2;
+ }
+ } else {
+ memset(AESInitVec, 0, sizeof(AESInitVec));
+ AESInitVec[15] = mSeqNumber & 0xff;
+ AESInitVec[14] = (mSeqNumber >> 8) & 0xff;
+ AESInitVec[13] = (mSeqNumber >> 16) & 0xff;
+ AESInitVec[12] = (mSeqNumber >> 24) & 0xff;
+ }
+
+ bool newKey = memcmp(mKeyData, key->data(), AES_BLOCK_SIZE) != 0;
+ bool newInitVec = memcmp(mAESInitVec, AESInitVec, AES_BLOCK_SIZE) != 0;
+ bool newSampleAesKeyItem = newKey || newInitVec;
+ ALOGV("decryptBuffer: SAMPLE-AES newKeyItem %d/%d (Key %d initVec %d)",
+ mSampleAesKeyItemChanged, newSampleAesKeyItem, newKey, newInitVec);
+
+ if (newSampleAesKeyItem) {
+ memcpy(mKeyData, key->data(), AES_BLOCK_SIZE);
+ memcpy(mAESInitVec, AESInitVec, AES_BLOCK_SIZE);
+
+ if (method == "SAMPLE-AES") {
+ mSampleAesKeyItemChanged = true;
+
+ sp<ABuffer> keyDataBuffer = ABuffer::CreateAsCopy(mKeyData, sizeof(mKeyData));
+ sp<ABuffer> initVecBuffer = ABuffer::CreateAsCopy(mAESInitVec, sizeof(mAESInitVec));
+
+ // always allocating a new one rather than updating the old message
+ // lower layer might still have a reference to the old message
+ mSampleAesKeyItem = new AMessage();
+ mSampleAesKeyItem->setBuffer("keyData", keyDataBuffer);
+ mSampleAesKeyItem->setBuffer("initVec", initVecBuffer);
+
+ ALOGV("decryptBuffer: New SampleAesKeyItem: Key: %s IV: %s",
+ HlsSampleDecryptor::aesBlockToStr(mKeyData).c_str(),
+ HlsSampleDecryptor::aesBlockToStr(mAESInitVec).c_str());
+ } // SAMPLE-AES
+ } // newSampleAesKeyItem
+ } // first
+
+ if (method == "SAMPLE-AES") {
+ ALOGV("decryptBuffer: skipping full-seg decrypt for SAMPLE-AES");
+ return OK;
+ }
+
+
AES_KEY aes_key;
if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
ALOGE("failed to set AES decryption key.");
@@ -361,44 +449,6 @@
return ERROR_MALFORMED;
}
- if (first) {
- // If decrypting the first block in a file, read the iv from the manifest
- // or derive the iv from the file's sequence number.
-
- AString iv;
- if (itemMeta->findString("cipher-iv", &iv)) {
- if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
- || iv.size() > 16 * 2 + 2) {
- ALOGE("malformed cipher IV '%s'.", iv.c_str());
- return ERROR_MALFORMED;
- }
-
- while (iv.size() < 16 * 2 + 2) {
- iv.insert("0", 1, 2);
- }
-
- memset(mAESInitVec, 0, sizeof(mAESInitVec));
- for (size_t i = 0; i < 16; ++i) {
- char c1 = tolower(iv.c_str()[2 + 2 * i]);
- char c2 = tolower(iv.c_str()[3 + 2 * i]);
- if (!isxdigit(c1) || !isxdigit(c2)) {
- ALOGE("malformed cipher IV '%s'.", iv.c_str());
- return ERROR_MALFORMED;
- }
- uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
- uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
-
- mAESInitVec[i] = nibble1 << 4 | nibble2;
- }
- } else {
- memset(mAESInitVec, 0, sizeof(mAESInitVec));
- mAESInitVec[15] = mSeqNumber & 0xff;
- mAESInitVec[14] = (mSeqNumber >> 8) & 0xff;
- mAESInitVec[13] = (mSeqNumber >> 16) & 0xff;
- mAESInitVec[12] = (mSeqNumber >> 24) & 0xff;
- }
- }
-
AES_cbc_encrypt(
buffer->data(), buffer->data(), buffer->size(),
&aes_key, mAESInitVec, AES_DECRYPT);
@@ -409,7 +459,7 @@
status_t PlaylistFetcher::checkDecryptPadding(const sp<ABuffer> &buffer) {
AString method;
CHECK(buffer->meta()->findString("cipher-method", &method));
- if (method == "NONE") {
+ if (method == "NONE" || method == "SAMPLE-AES") {
return OK;
}
@@ -1656,6 +1706,11 @@
mNextPTSTimeUs = -1ll;
}
+ if (mSampleAesKeyItemChanged) {
+ mTSParser->signalNewSampleAesKey(mSampleAesKeyItem);
+ mSampleAesKeyItemChanged = false;
+ }
+
size_t offset = 0;
while (offset + 188 <= buffer->size()) {
status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
@@ -2038,10 +2093,24 @@
}
}
+ sp<HlsSampleDecryptor> sampleDecryptor = NULL;
+ if (mSampleAesKeyItem != NULL) {
+ ALOGV("extractAndQueueAccessUnits[%d] SampleAesKeyItem: Key: %s IV: %s",
+ mSeqNumber,
+ HlsSampleDecryptor::aesBlockToStr(mKeyData).c_str(),
+ HlsSampleDecryptor::aesBlockToStr(mAESInitVec).c_str());
+
+ sampleDecryptor = new HlsSampleDecryptor(mSampleAesKeyItem);
+ }
+
+ int frameId = 0;
+
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
CHECK_LT(offset + 5, buffer->size());
+ // non-const pointer for decryption if needed
+ uint8_t *adtsFrame = buffer->data() + offset;
unsigned aac_frame_length =
((adtsHeader[3] & 3) << 11)
@@ -2099,6 +2168,18 @@
}
}
+ if (sampleDecryptor != NULL) {
+ bool protection_absent = (adtsHeader[1] & 0x1);
+ size_t headerSize = protection_absent ? 7 : 9;
+ if (frameId == 0) {
+ ALOGV("extractAndQueueAAC[%d] protection_absent %d (%02x) headerSize %zu",
+ mSeqNumber, protection_absent, adtsHeader[1], headerSize);
+ }
+
+ sampleDecryptor->processAAC(headerSize, adtsFrame, aac_frame_length);
+ }
+ frameId++;
+
sp<ABuffer> unit = new ABuffer(aac_frame_length);
memcpy(unit->data(), adtsHeader, aac_frame_length);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index ee7d3a1..d7db54a 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -19,6 +19,7 @@
#define PLAYLIST_FETCHER_H_
#include <media/stagefright/foundation/AHandler.h>
+#include <openssl/aes.h>
#include "mpeg2ts/ATSParser.h"
#include "LiveSession.h"
@@ -175,7 +176,10 @@
// Stores the initialization vector to decrypt the next block of cipher text, which can
// either be derived from the sequence number, read from the manifest, or copied from
// the last block of cipher text (cipher-block chaining).
- unsigned char mAESInitVec[16];
+ unsigned char mAESInitVec[AES_BLOCK_SIZE];
+ unsigned char mKeyData[AES_BLOCK_SIZE];
+ bool mSampleAesKeyItemChanged;
+ sp<AMessage> mSampleAesKeyItem;
Mutex mThresholdLock;
float mThresholdRatio;
diff --git a/media/libstagefright/include/ACodec.h b/media/libstagefright/include/ACodec.h
index 6c1a5c6..06ee0e8 100644
--- a/media/libstagefright/include/ACodec.h
+++ b/media/libstagefright/include/ACodec.h
@@ -293,8 +293,8 @@
int64_t mRepeatFrameDelayUs;
int64_t mMaxPtsGapUs;
float mMaxFps;
- int64_t mTimePerFrameUs;
- int64_t mTimePerCaptureUs;
+ double mFps;
+ double mCaptureFps;
bool mCreateInputBuffersSuspended;
uint32_t mLatency;
diff --git a/media/libstagefright/include/foundation/ADebug.h b/media/libstagefright/include/foundation/ADebug.h
index 564b3f7..9ad45f3 100644
--- a/media/libstagefright/include/foundation/ADebug.h
+++ b/media/libstagefright/include/foundation/ADebug.h
@@ -99,10 +99,30 @@
#define CHECK_GE(x,y) CHECK_OP(x,y,GE,>=)
#define CHECK_GT(x,y) CHECK_OP(x,y,GT,>)
-#define TRESPASS() \
+#define TRESPASS(...) \
LOG_ALWAYS_FATAL( \
__FILE__ ":" LITERAL_TO_STRING(__LINE__) \
- " Should not be here.");
+ " Should not be here. " __VA_ARGS__);
+
+#ifdef NDEBUG
+#define CHECK_DBG CHECK
+#define CHECK_EQ_DBG CHECK_EQ
+#define CHECK_NE_DBG CHECK_NE
+#define CHECK_LE_DBG CHECK_LE
+#define CHECK_LT_DBG CHECK_LT
+#define CHECK_GE_DBG CHECK_GE
+#define CHECK_GT_DBG CHECK_GT
+#define TRESPASS_DBG TRESPASS
+#else
+#define CHECK_DBG(condition)
+#define CHECK_EQ_DBG(x,y)
+#define CHECK_NE_DBG(x,y)
+#define CHECK_LE_DBG(x,y)
+#define CHECK_LT_DBG(x,y)
+#define CHECK_GE_DBG(x,y)
+#define CHECK_GT_DBG(x,y)
+#define TRESPASS_DBG(...)
+#endif
struct ADebug {
enum Level {
diff --git a/media/libstagefright/include/foundation/FileDescriptor.h b/media/libstagefright/include/foundation/FileDescriptor.h
new file mode 100644
index 0000000..7acf4b8
--- /dev/null
+++ b/media/libstagefright/include/foundation/FileDescriptor.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_FILE_DESCRIPTOR_H_
+#define STAGEFRIGHT_FOUNDATION_FILE_DESCRIPTOR_H_
+
+#include <memory>
+
+namespace android {
+
+/**
+ * FileDescriptor is a utility class for managing file descriptors in a scoped way.
+ *
+ * usage:
+ *
+ * status_t function(int fd) {
+ * FileDescriptor::Autoclose managedFd(fd);
+ * if (error_condition)
+ * return ERROR;
+ * next_function(managedFd.release());
+ * }
+ */
+struct FileDescriptor {
+ // created this class with minimal methods. more methods can be added here to manage
+ // a shared file descriptor object.
+
+ /**
+ * A locally scoped managed file descriptor object. This object is not shareable/copiable and
+ * is not thread safe.
+ */
+ struct Autoclose {
+ // created this class with minimal methods
+ /**
+ * Creates a locally scoped file descriptor holder object taking ownership of the passed in
+ * file descriptor.
+ */
+ Autoclose(int fd)
+ : mFd(fd) {
+
+ }
+
+ ~Autoclose() {
+ if (isValid()) {
+ ::close(mFd);
+ mFd = kInvalidFileDescriptor;
+ }
+ }
+
+ /**
+ * Releases the managed file descriptor from the holder. This invalidates the (remaining)
+ * file descriptor in this object.
+ */
+ int release() {
+ int managedFd = mFd;
+ mFd = kInvalidFileDescriptor;
+ return managedFd;
+ }
+
+ /**
+ * Checks whether the managed file descriptor is valid
+ */
+ bool isValid() const {
+ return mFd >= 0;
+ }
+
+ private:
+ // not yet needed
+
+ /**
+ * Returns the managed file descriptor from this object without releasing the ownership.
+ * The returned file descriptor has the same lifecycle as the managed file descriptor
+ * in this object. Therefore, care must be taken that it is not closed, and that this
+ * object keeps managing the returned file descriptor for the duration of its use.
+ */
+ int get() const {
+ return mFd;
+ }
+
+ private:
+ int mFd;
+
+ enum {
+ kInvalidFileDescriptor = -1,
+ };
+
+ DISALLOW_EVIL_CONSTRUCTORS(Autoclose);
+ };
+
+private:
+ std::shared_ptr<Autoclose> mSharedFd;
+};
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 8099edb..31edb21 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -105,6 +105,8 @@
void updateCasSessions();
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
private:
struct StreamInfo {
unsigned mType;
@@ -119,6 +121,7 @@
bool mFirstPTSValid;
uint64_t mFirstPTS;
int64_t mLastRecoveredPTS;
+ sp<AMessage> mSampleAesKeyItem;
status_t parseProgramMap(ABitReader *br);
int64_t recoverPTS(uint64_t PTS_33bit);
@@ -168,6 +171,8 @@
bool isVideo() const;
bool isMeta() const;
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
protected:
virtual ~Stream();
@@ -194,6 +199,8 @@
ElementaryStreamQueue *mQueue;
bool mScrambled;
+ bool mSampleEncrypted;
+ sp<AMessage> mSampleAesKeyItem;
sp<IMemory> mMem;
sp<MemoryDealer> mDealer;
sp<ABuffer> mDescrambledBuffer;
@@ -586,6 +593,10 @@
sp<Stream> stream = new Stream(
this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+ if (mSampleAesKeyItem != NULL) {
+ stream->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
+
isAddingScrambledStream |= info.mCASystemId >= 0;
mStreams.add(info.mPID, stream);
}
@@ -710,22 +721,32 @@
mPrevPTS(0),
mQueue(NULL),
mScrambled(CA_system_ID >= 0) {
- ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d",
- elementaryPID, streamType, mScrambled);
- uint32_t flags = (isVideo() && mScrambled) ?
- ElementaryStreamQueue::kFlag_ScrambledData : 0;
+ mSampleEncrypted =
+ mStreamType == STREAMTYPE_H264_ENCRYPTED ||
+ mStreamType == STREAMTYPE_AAC_ENCRYPTED ||
+ mStreamType == STREAMTYPE_AC3_ENCRYPTED;
+
+ ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d, SampleEncrypted: %d",
+ elementaryPID, streamType, mScrambled, mSampleEncrypted);
+
+ uint32_t flags =
+ (isVideo() && mScrambled) ? ElementaryStreamQueue::kFlag_ScrambledData :
+ (mSampleEncrypted) ? ElementaryStreamQueue::kFlag_SampleEncryptedData :
+ 0;
ElementaryStreamQueue::Mode mode = ElementaryStreamQueue::INVALID;
switch (mStreamType) {
case STREAMTYPE_H264:
+ case STREAMTYPE_H264_ENCRYPTED:
mode = ElementaryStreamQueue::H264;
flags |= (mProgram->parserFlags() & ALIGNED_VIDEO_DATA) ?
ElementaryStreamQueue::kFlag_AlignedData : 0;
break;
case STREAMTYPE_MPEG2_AUDIO_ADTS:
+ case STREAMTYPE_AAC_ENCRYPTED:
mode = ElementaryStreamQueue::AAC;
break;
@@ -745,6 +766,7 @@
case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
+ case STREAMTYPE_AC3_ENCRYPTED:
mode = ElementaryStreamQueue::AC3;
break;
@@ -761,6 +783,10 @@
mQueue = new ElementaryStreamQueue(mode, flags);
if (mQueue != NULL) {
+ if (mSampleAesKeyItem != NULL) {
+ mQueue->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
+
ensureBufferCapacity(kInitialStreamBufferSize);
if (mScrambled && (isAudio() || isVideo())) {
@@ -913,6 +939,7 @@
bool ATSParser::Stream::isVideo() const {
switch (mStreamType) {
case STREAMTYPE_H264:
+ case STREAMTYPE_H264_ENCRYPTED:
case STREAMTYPE_MPEG1_VIDEO:
case STREAMTYPE_MPEG2_VIDEO:
case STREAMTYPE_MPEG4_VIDEO:
@@ -930,6 +957,8 @@
case STREAMTYPE_MPEG2_AUDIO_ADTS:
case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
+ case STREAMTYPE_AAC_ENCRYPTED:
+ case STREAMTYPE_AC3_ENCRYPTED:
return true;
default:
@@ -1454,7 +1483,7 @@
mPrevPTS = PTS;
#endif
- ALOGV("onPayloadData mStreamType=0x%02x", mStreamType);
+ ALOGV("onPayloadData mStreamType=0x%02x size: %zu", mStreamType, size);
int64_t timeUs = 0ll; // no presentation timestamp available.
if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
@@ -1492,6 +1521,8 @@
}
mSource = new AnotherPacketSource(meta);
mSource->queueAccessUnit(accessUnit);
+ ALOGV("onPayloadData: created AnotherPacketSource PID 0x%08x of type 0x%02x",
+ mElementaryPID, mStreamType);
}
} else if (mQueue->getFormat() != NULL) {
// After a discontinuity we invalidate the queue's format
@@ -1730,6 +1761,9 @@
if (!found) {
mPrograms.push(
new Program(this, program_number, programMapPID, mLastRecoveredPTS));
+ if (mSampleAesKeyItem != NULL) {
+ mPrograms.top()->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
}
if (mPSISections.indexOfKey(programMapPID) < 0) {
@@ -2228,4 +2262,40 @@
ALOGV("crc: %08x\n", crc);
return (crc == 0);
}
+
+// SAMPLE_AES key handling
+// TODO: Merge these to their respective class after Widevine-HLS
+void ATSParser::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("signalNewSampleAesKey: %p", keyItem.get());
+
+ mSampleAesKeyItem = keyItem;
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ for (size_t i = 0; i < mPrograms.size(); ++i) {
+ mPrograms[i]->signalNewSampleAesKey(keyItem);
+ }
+}
+
+void ATSParser::Program::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("Program::signalNewSampleAesKey: %p", keyItem.get());
+
+ mSampleAesKeyItem = keyItem;
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ for (size_t i = 0; i < mStreams.size(); ++i) {
+ mStreams[i]->signalNewSampleAesKey(keyItem);
+ }
+}
+
+void ATSParser::Stream::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("Stream::signalNewSampleAesKey: 0x%04x size = %zu keyItem: %p",
+ mElementaryPID, mBuffer->size(), keyItem.get());
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ mSampleAesKeyItem = keyItem;
+
+ flush(NULL);
+ mQueue->signalNewSampleAesKey(keyItem);
+}
+
} // namespace android
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 4a88713..374e011 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -131,6 +131,8 @@
int64_t getFirstPTSTimeUs();
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
enum {
// From ISO/IEC 13818-1: 2000 (E), Table 2-29
STREAMTYPE_RESERVED = 0x00,
@@ -149,6 +151,11 @@
// Stream type 0x83 is non-standard,
// it could be LPCM or TrueHD AC3
STREAMTYPE_LPCM_AC3 = 0x83,
+
+ //Sample Encrypted types
+ STREAMTYPE_H264_ENCRYPTED = 0xDB,
+ STREAMTYPE_AAC_ENCRYPTED = 0xCF,
+ STREAMTYPE_AC3_ENCRYPTED = 0xC1,
};
protected:
@@ -181,6 +188,8 @@
size_t mNumTSPacketsParsed;
+ sp<AMessage> mSampleAesKeyItem;
+
void parseProgramAssociationTable(ABitReader *br);
void parseProgramMap(ABitReader *br);
// Parse PES packet where br is pointing to. If the PES contains a sync
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index 5140e66..20acfe7 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -7,6 +7,7 @@
ATSParser.cpp \
CasManager.cpp \
ESQueue.cpp \
+ HlsSampleDecryptor.cpp \
MPEG2PSExtractor.cpp \
MPEG2TSExtractor.cpp \
@@ -18,7 +19,9 @@
LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
LOCAL_SANITIZE_DIAG := cfi
-LOCAL_SHARED_LIBRARIES := libmedia
+LOCAL_SHARED_LIBRARIES := \
+ libcrypto \
+ libmedia \
LOCAL_MODULE:= libstagefright_mpeg2ts
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index ae7ec77..f1b44ae 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -42,7 +42,15 @@
: mMode(mode),
mFlags(flags),
mEOSReached(false),
- mCASystemId(0) {
+ mCASystemId(0),
+ mAUIndex(0) {
+
+ ALOGV("ElementaryStreamQueue(%p) mode %x flags %x isScrambled %d isSampleEncrypted %d",
+ this, mode, flags, isScrambled(), isSampleEncrypted());
+
+ // Create the decryptor anyway since we don't know the use-case unless key is provided
+ // Won't decrypt if key info not available (e.g., scanner/extractor just parsing ts files)
+ mSampleDecryptor = isSampleEncrypted() ? new HlsSampleDecryptor : NULL;
}
sp<MetaData> ElementaryStreamQueue::getFormat() {
@@ -659,6 +667,9 @@
unsigned syncStartPos = 0; // in bytes
unsigned payloadSize = 0;
sp<MetaData> format = new MetaData;
+
+ ALOGV("dequeueAccessUnit_AC3[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
while (true) {
if (syncStartPos + 2 >= mBuffer->size()) {
return NULL;
@@ -671,6 +682,10 @@
if (payloadSize > 0) {
break;
}
+
+ ALOGV("dequeueAccessUnit_AC3[%d]: syncStartPos %u payloadSize %u",
+ mAUIndex, syncStartPos, payloadSize);
+
++syncStartPos;
}
@@ -683,14 +698,22 @@
mFormat = format;
}
- sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
- memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
if (timeUs < 0ll) {
ALOGE("negative timeUs");
return NULL;
}
+
+ // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
+ if (mSampleDecryptor != NULL) {
+ mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
accessUnit->meta()->setInt64("timeUs", timeUs);
accessUnit->meta()->setInt32("isSync", 1);
@@ -791,6 +814,17 @@
return NULL;
}
+ ALOGV("dequeueAccessUnit_AAC[%d]: mBuffer %zu info.mLength %zu",
+ mAUIndex, mBuffer->size(), info.mLength);
+
+ struct ADTSPosition {
+ size_t offset;
+ size_t headerSize;
+ size_t length;
+ };
+
+ Vector<ADTSPosition> frames;
+
// The idea here is consume all AAC frames starting at offsets before
// info.mLength so we can assign a meaningful timestamp without
// having to interpolate.
@@ -811,7 +845,7 @@
return NULL;
}
bits.skipBits(3); // ID, layer
- bool protection_absent __unused = bits.getBits(1) != 0;
+ bool protection_absent = bits.getBits(1) != 0;
if (mFormat == NULL) {
unsigned profile = bits.getBits(2);
@@ -873,11 +907,36 @@
return NULL;
}
- size_t headerSize __unused = protection_absent ? 7 : 9;
+ size_t headerSize = protection_absent ? 7 : 9;
+
+ // tracking the frame positions first then decrypt only if an accessUnit to be generated
+ if (mSampleDecryptor != NULL) {
+ ADTSPosition frame = {
+ .offset = offset,
+ .headerSize = headerSize,
+ .length = aac_frame_length
+ };
+
+ frames.push(frame);
+ }
offset += aac_frame_length;
}
+ // Decrypting only if the loop didn't exit early and an accessUnit is about to be generated
+ // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
+ if (mSampleDecryptor != NULL) {
+ for (size_t frameId = 0; frameId < frames.size(); frameId++) {
+ const ADTSPosition &frame = frames.itemAt(frameId);
+
+ mSampleDecryptor->processAAC(frame.headerSize,
+ mBuffer->data() + frame.offset, frame.length);
+// ALOGV("dequeueAccessUnitAAC[%zu]: while offset %zu headerSize %zu frame_len %zu",
+// frameId, frame.offset, frame.headerSize, frame.length);
+ }
+ }
+ mAUIndex++;
+
int64_t timeUs = fetchTimestamp(offset);
sp<ABuffer> accessUnit = new ABuffer(offset);
@@ -970,6 +1029,9 @@
size_t nalSize;
bool foundSlice = false;
bool foundIDR = false;
+
+ ALOGV("dequeueAccessUnit_H264[%d] %p/%zu", mAUIndex, data, size);
+
while ((err = getNextNALUnit(&data, &size, &nalStart, &nalSize)) == OK) {
if (nalSize == 0) continue;
@@ -981,6 +1043,7 @@
foundIDR = true;
}
if (foundSlice) {
+ //TODO: Shouldn't this have been called with nalSize-1?
ABitReader br(nalStart + 1, nalSize);
unsigned first_mb_in_slice = parseUE(&br);
@@ -1021,6 +1084,7 @@
size_t dstOffset = 0;
size_t seiIndex = 0;
+ size_t shrunkBytes = 0;
for (size_t i = 0; i < nals.size(); ++i) {
const NALPosition &pos = nals.itemAt(i);
@@ -1047,11 +1111,30 @@
memcpy(accessUnit->data() + dstOffset, "\x00\x00\x00\x01", 4);
- memcpy(accessUnit->data() + dstOffset + 4,
- mBuffer->data() + pos.nalOffset,
- pos.nalSize);
+ if (mSampleDecryptor != NULL && (nalType == 1 || nalType == 5)) {
+ uint8_t *nalData = mBuffer->data() + pos.nalOffset;
+ size_t newSize = mSampleDecryptor->processNal(nalData, pos.nalSize);
+ // Note: the data can shrink due to unescaping
+ memcpy(accessUnit->data() + dstOffset + 4,
+ nalData,
+ newSize);
+ dstOffset += newSize + 4;
- dstOffset += pos.nalSize + 4;
+ size_t thisShrunkBytes = pos.nalSize - newSize;
+ //ALOGV("dequeueAccessUnitH264[%d]: nalType: %d -> %zu (%zu)",
+ // nalType, (int)pos.nalSize, newSize, thisShrunkBytes);
+
+ shrunkBytes += thisShrunkBytes;
+ }
+ else {
+ memcpy(accessUnit->data() + dstOffset + 4,
+ mBuffer->data() + pos.nalOffset,
+ pos.nalSize);
+
+ dstOffset += pos.nalSize + 4;
+ //ALOGV("dequeueAccessUnitH264 [%d] %d @%d",
+ // nalType, (int)pos.nalSize, (int)pos.nalOffset);
+ }
}
#if !LOG_NDEBUG
@@ -1082,6 +1165,18 @@
mFormat = MakeAVCCodecSpecificData(accessUnit);
}
+ if (mSampleDecryptor != NULL && shrunkBytes > 0) {
+ size_t adjustedSize = accessUnit->size() - shrunkBytes;
+ ALOGV("dequeueAccessUnitH264[%d]: AU size adjusted %zu -> %zu",
+ mAUIndex, accessUnit->size(), adjustedSize);
+ accessUnit->setRange(0, adjustedSize);
+ }
+
+ ALOGV("dequeueAccessUnitH264[%d]: AU %p(%zu) dstOffset:%zu, nals:%zu, totalSize:%zu ",
+ mAUIndex, accessUnit->data(), accessUnit->size(),
+ dstOffset, nals.size(), totalSize);
+ mAUIndex++;
+
return accessUnit;
}
@@ -1612,4 +1707,15 @@
return accessUnit;
}
+void ElementaryStreamQueue::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ if (mSampleDecryptor == NULL) {
+ ALOGE("signalNewSampleAesKey: Stream %x is not encrypted; keyItem: %p",
+ mMode, keyItem.get());
+ return;
+ }
+
+ mSampleDecryptor->signalNewSampleAesKey(keyItem);
+}
+
+
} // namespace android
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 11e1af7..ffcb502 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -19,11 +19,14 @@
#define ES_QUEUE_H_
#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <utils/Errors.h>
#include <utils/List.h>
#include <utils/RefBase.h>
#include <vector>
+#include "HlsSampleDecryptor.h"
+
namespace android {
struct ABuffer;
@@ -46,6 +49,7 @@
// Data appended to the queue is always at access unit boundaries.
kFlag_AlignedData = 1,
kFlag_ScrambledData = 2,
+ kFlag_SampleEncryptedData = 4,
};
explicit ElementaryStreamQueue(Mode mode, uint32_t flags = 0);
@@ -69,6 +73,8 @@
void setCasInfo(int32_t systemId, const std::vector<uint8_t> &sessionId);
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
private:
struct RangeInfo {
int64_t mTimestampUs;
@@ -100,6 +106,13 @@
sp<MetaData> mFormat;
+ sp<HlsSampleDecryptor> mSampleDecryptor;
+ int mAUIndex;
+
+ bool isSampleEncrypted() const {
+ return (mFlags & kFlag_SampleEncryptedData) != 0;
+ }
+
sp<ABuffer> dequeueAccessUnitH264();
sp<ABuffer> dequeueAccessUnitAAC();
sp<ABuffer> dequeueAccessUnitAC3();
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
new file mode 100644
index 0000000..e32f676
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HlsSampleDecryptor"
+
+#include "HlsSampleDecryptor.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/Utils.h>
+
+
+namespace android {
+
+HlsSampleDecryptor::HlsSampleDecryptor()
+ : mValidKeyInfo(false) {
+}
+
+HlsSampleDecryptor::HlsSampleDecryptor(const sp<AMessage> &sampleAesKeyItem)
+ : mValidKeyInfo(false) {
+
+ signalNewSampleAesKey(sampleAesKeyItem);
+}
+
+void HlsSampleDecryptor::signalNewSampleAesKey(const sp<AMessage> &sampleAesKeyItem) {
+
+ if (sampleAesKeyItem == NULL) {
+ mValidKeyInfo = false;
+ ALOGW("signalNewSampleAesKey: sampleAesKeyItem is NULL");
+ return;
+ }
+
+ sp<ABuffer> keyDataBuffer, initVecBuffer;
+ sampleAesKeyItem->findBuffer("keyData", &keyDataBuffer);
+ sampleAesKeyItem->findBuffer("initVec", &initVecBuffer);
+
+ if (keyDataBuffer != NULL && keyDataBuffer->size() == AES_BLOCK_SIZE &&
+ initVecBuffer != NULL && initVecBuffer->size() == AES_BLOCK_SIZE) {
+
+ ALOGV("signalNewSampleAesKey: Key: %s IV: %s",
+ aesBlockToStr(keyDataBuffer->data()).c_str(),
+ aesBlockToStr(initVecBuffer->data()).c_str());
+
+ uint8_t KeyData[AES_BLOCK_SIZE];
+ memcpy(KeyData, keyDataBuffer->data(), AES_BLOCK_SIZE);
+ memcpy(mAESInitVec, initVecBuffer->data(), AES_BLOCK_SIZE);
+
+ mValidKeyInfo = (AES_set_decrypt_key(KeyData, 8*AES_BLOCK_SIZE/*128*/, &mAesKey) == 0);
+ if (!mValidKeyInfo) {
+ ALOGE("signalNewSampleAesKey: failed to set AES decryption key.");
+ }
+
+ } else {
+ // Media scanner might try extract/parse the TS files without knowing the key.
+ // Otherwise, shouldn't get here (unless an invalid playlist has swaped SAMPLE-AES with
+ // NONE method while still sample-encrypted stream is parsed).
+
+ mValidKeyInfo = false;
+ ALOGE("signalNewSampleAesKey Can't decrypt; keyDataBuffer: %p(%zu) initVecBuffer: %p(%zu)",
+ keyDataBuffer.get(), (keyDataBuffer.get() == NULL)? -1 : keyDataBuffer->size(),
+ initVecBuffer.get(), (initVecBuffer.get() == NULL)? -1 : initVecBuffer->size());
+ }
+}
+
+size_t HlsSampleDecryptor::processNal(uint8_t *nalData, size_t nalSize) {
+
+ unsigned nalType = nalData[0] & 0x1f;
+ if (!mValidKeyInfo) {
+ ALOGV("processNal[%d]: (%p)/%zu Skipping due to invalid key", nalType, nalData, nalSize);
+ return nalSize;
+ }
+
+ bool isEncrypted = (nalSize > VIDEO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processNal[%d]: (%p)/%zu isEncrypted: %d", nalType, nalData, nalSize, isEncrypted);
+
+ if (isEncrypted) {
+ // Encrypted NALUs have extra start code emulation prevention that must be
+ // stripped out before we can decrypt it.
+ size_t newSize = unescapeStream(nalData, nalSize);
+
+ ALOGV("processNal:unescapeStream[%d]: %zu -> %zu", nalType, nalSize, newSize);
+ nalSize = newSize;
+
+ //Encrypted_nal_unit () {
+ // nal_unit_type_byte // 1 byte
+ // unencrypted_leader // 31 bytes
+ // while (bytes_remaining() > 0) {
+ // if (bytes_remaining() > 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_block // MIN(144, bytes_remaining()) bytes
+ // }
+ //}
+
+ size_t offset = VIDEO_CLEAR_LEAD;
+ size_t remainingBytes = nalSize - VIDEO_CLEAR_LEAD;
+
+ // a copy of initVec as decryptBlock updates it
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ while (remainingBytes > 0) {
+ // encrypted_block: protected block uses 10% skip encryption
+ if (remainingBytes > AES_BLOCK_SIZE) {
+ uint8_t *encrypted = nalData + offset;
+ status_t ret = decryptBlock(encrypted, AES_BLOCK_SIZE, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processNal failed with %d", ret);
+ return nalSize; // revisit this
+ }
+
+ offset += AES_BLOCK_SIZE;
+ remainingBytes -= AES_BLOCK_SIZE;
+ }
+
+ // unencrypted_block
+ size_t clearBytes = std::min(remainingBytes, (size_t)(9 * AES_BLOCK_SIZE));
+
+ offset += clearBytes;
+ remainingBytes -= clearBytes;
+ } // while
+
+ } else { // isEncrypted == false
+ ALOGV("processNal[%d]: Unencrypted NALU (%p)/%zu", nalType, nalData, nalSize);
+ }
+
+ return nalSize;
+}
+
+void HlsSampleDecryptor::processAAC(size_t adtsHdrSize, uint8_t *data, size_t size) {
+
+ if (!mValidKeyInfo) {
+ ALOGV("processAAC: (%p)/%zu Skipping due to invalid key", data, size);
+ return;
+ }
+
+ // ADTS header is included in the size
+ size_t offset = adtsHdrSize;
+ size_t remainingBytes = size - adtsHdrSize;
+
+ bool isEncrypted = (remainingBytes >= AUDIO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processAAC: header: %zu data: %p(%zu) isEncrypted: %d",
+ adtsHdrSize, data, size, isEncrypted);
+
+ //Encrypted_AAC_Frame () {
+ // ADTS_Header // 7 or 9 bytes
+ // unencrypted_leader // 16 bytes
+ // while (bytes_remaining() >= 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_trailer // 0-15 bytes
+ //}
+
+ // with lead bytes
+ if (remainingBytes >= AUDIO_CLEAR_LEAD) {
+ offset += AUDIO_CLEAR_LEAD;
+ remainingBytes -= AUDIO_CLEAR_LEAD;
+
+ // encrypted_block
+ if (remainingBytes >= AES_BLOCK_SIZE) {
+
+ size_t encryptedBytes = (remainingBytes / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ // decrypting all blocks at once
+ uint8_t *encrypted = data + offset;
+ status_t ret = decryptBlock(encrypted, encryptedBytes, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processAAC: decryptBlock failed with %d", ret);
+ return;
+ }
+
+ offset += encryptedBytes;
+ remainingBytes -= encryptedBytes;
+ } // encrypted
+
+ // unencrypted_trailer
+ size_t clearBytes = remainingBytes;
+ if (clearBytes > 0) {
+ CHECK(clearBytes < AES_BLOCK_SIZE);
+ }
+
+ } else { // without lead bytes
+ ALOGV("processAAC: Unencrypted frame (without lead bytes) size %zu = %zu (hdr) + %zu (rem)",
+ size, adtsHdrSize, remainingBytes);
+ }
+
+}
+
+void HlsSampleDecryptor::processAC3(uint8_t *data, size_t size) {
+
+ if (!mValidKeyInfo) {
+ ALOGV("processAC3: (%p)/%zu Skipping due to invalid key", data, size);
+ return;
+ }
+
+ bool isEncrypted = (size >= AUDIO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processAC3 %p(%zu) isEncrypted: %d", data, size, isEncrypted);
+
+ //Encrypted_AC3_Frame () {
+ // unencrypted_leader // 16 bytes
+ // while (bytes_remaining() >= 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_trailer // 0-15 bytes
+ //}
+
+ if (size >= AUDIO_CLEAR_LEAD) {
+ // unencrypted_leader
+ size_t offset = AUDIO_CLEAR_LEAD;
+ size_t remainingBytes = size - AUDIO_CLEAR_LEAD;
+
+ if (remainingBytes >= AES_BLOCK_SIZE) {
+
+ size_t encryptedBytes = (remainingBytes / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
+
+ // encrypted_block
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ // decrypting all blocks at once
+ uint8_t *encrypted = data + offset;
+ status_t ret = decryptBlock(encrypted, encryptedBytes, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processAC3: decryptBlock failed with %d", ret);
+ return;
+ }
+
+ offset += encryptedBytes;
+ remainingBytes -= encryptedBytes;
+ } // encrypted
+
+ // unencrypted_trailer
+ size_t clearBytes = remainingBytes;
+ if (clearBytes > 0) {
+ CHECK(clearBytes < AES_BLOCK_SIZE);
+ }
+
+ } else {
+ ALOGV("processAC3: Unencrypted frame (without lead bytes) size %zu", size);
+ }
+}
+
+// Unescapes data replacing occurrences of [0, 0, 3] with [0, 0] and returns the new size
+size_t HlsSampleDecryptor::unescapeStream(uint8_t *data, size_t limit) const {
+ Vector<size_t> scratchEscapePositions;
+ size_t position = 0;
+
+ while (position < limit) {
+ position = findNextUnescapeIndex(data, position, limit);
+ if (position < limit) {
+ scratchEscapePositions.add(position);
+ position += 3;
+ }
+ }
+
+ size_t scratchEscapeCount = scratchEscapePositions.size();
+ size_t escapedPosition = 0; // The position being read from.
+ size_t unescapedPosition = 0; // The position being written to.
+ for (size_t i = 0; i < scratchEscapeCount; i++) {
+ size_t nextEscapePosition = scratchEscapePositions[i];
+ //TODO: add 2 and get rid of the later = 0 assignments
+ size_t copyLength = nextEscapePosition - escapedPosition;
+ memmove(data+unescapedPosition, data+escapedPosition, copyLength);
+ unescapedPosition += copyLength;
+ data[unescapedPosition++] = 0;
+ data[unescapedPosition++] = 0;
+ escapedPosition += copyLength + 3;
+ }
+
+ size_t unescapedLength = limit - scratchEscapeCount;
+ size_t remainingLength = unescapedLength - unescapedPosition;
+ memmove(data+unescapedPosition, data+escapedPosition, remainingLength);
+
+ return unescapedLength;
+}
+
+size_t HlsSampleDecryptor::findNextUnescapeIndex(uint8_t *data, size_t offset, size_t limit) const {
+ for (size_t i = offset; i < limit - 2; i++) {
+ //TODO: speed
+ if (data[i] == 0x00 && data[i + 1] == 0x00 && data[i + 2] == 0x03) {
+ return i;
+ }
+ }
+ return limit;
+}
+
+status_t HlsSampleDecryptor::decryptBlock(uint8_t *buffer, size_t size,
+ uint8_t AESInitVec[AES_BLOCK_SIZE]) {
+ if (size == 0) {
+ return OK;
+ }
+
+ if ((size % AES_BLOCK_SIZE) != 0) {
+ ALOGE("decryptBlock: size (%zu) not a multiple of block size", size);
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("decryptBlock: %p (%zu)", buffer, size);
+
+ AES_cbc_encrypt(buffer, buffer, size, &mAesKey, AESInitVec, AES_DECRYPT);
+
+ return OK;
+}
+
+AString HlsSampleDecryptor::aesBlockToStr(uint8_t block[AES_BLOCK_SIZE]) {
+ AString result;
+
+ if (block == NULL) {
+ result = AString("null");
+ } else {
+ result = AStringPrintf("0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X",
+ block[0], block[1], block[2], block[3], block[4], block[5], block[6], block[7],
+ block[8], block[9], block[10], block[11], block[12], block[13], block[14], block[15]);
+ }
+
+ return result;
+}
+
+
+} // namespace android
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
new file mode 100644
index 0000000..2c76620
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SAMPLE_AES_PROCESSOR_H_
+
+#define SAMPLE_AES_PROCESSOR_H_
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <openssl/aes.h>
+
+#include <utils/Errors.h>
+#include <utils/List.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct HlsSampleDecryptor : RefBase {
+
+ HlsSampleDecryptor();
+ explicit HlsSampleDecryptor(const sp<AMessage> &sampleAesKeyItem);
+
+ void signalNewSampleAesKey(const sp<AMessage> &sampleAesKeyItem);
+
+ size_t processNal(uint8_t *nalData, size_t nalSize);
+ void processAAC(size_t adtsHdrSize, uint8_t *data, size_t size);
+ void processAC3(uint8_t *data, size_t size);
+
+ static AString aesBlockToStr(uint8_t block[AES_BLOCK_SIZE]);
+
+private:
+ size_t unescapeStream(uint8_t *data, size_t limit) const;
+ size_t findNextUnescapeIndex(uint8_t *data, size_t offset, size_t limit) const;
+ status_t decryptBlock(uint8_t *buffer, size_t size, uint8_t AESInitVec[AES_BLOCK_SIZE]);
+
+ static const int VIDEO_CLEAR_LEAD = 32;
+ static const int AUDIO_CLEAR_LEAD = 16;
+
+ AES_KEY mAesKey;
+ uint8_t mAESInitVec[AES_BLOCK_SIZE];
+ bool mValidKeyInfo;
+
+ DISALLOW_EVIL_CONSTRUCTORS(HlsSampleDecryptor);
+};
+
+} // namespace android
+
+#endif // SAMPLE_AES_PROCESSOR_H_
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
index 3c2face..e876306 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -192,8 +192,8 @@
}
Return<Status> TWGraphicBufferSource::setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) {
- return toStatus(mBase->setTimeLapseConfig(timePerFrameUs, timePerCaptureUs));
+ double fps, double captureFps) {
+ return toStatus(mBase->setTimeLapseConfig(fps, captureFps));
}
Return<Status> TWGraphicBufferSource::setStartTimeUs(int64_t startTimeUs) {
@@ -204,6 +204,13 @@
return toStatus(mBase->setStopTimeUs(stopTimeUs));
}
+Return<void> TWGraphicBufferSource::getStopTimeOffsetUs(
+ getStopTimeOffsetUs_cb _hidl_cb) {
+ // TODO: Implement this when needed.
+ _hidl_cb(Status::OK, 0);
+ return Void();
+}
+
Return<Status> TWGraphicBufferSource::setColorAspects(
const ColorAspects& aspects) {
return toStatus(mBase->setColorAspects(toCompactColorAspects(aspects)));
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
index 73b86b8..4549c97 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
@@ -78,10 +78,10 @@
Return<Status> setSuspend(bool suspend, int64_t timeUs) override;
Return<Status> setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
Return<Status> setMaxFps(float maxFps) override;
- Return<Status> setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
+ Return<Status> setTimeLapseConfig(double fps, double captureFps) override;
Return<Status> setStartTimeUs(int64_t startTimeUs) override;
Return<Status> setStopTimeUs(int64_t stopTimeUs) override;
+ Return<void> getStopTimeOffsetUs(getStopTimeOffsetUs_cb _hidl_cb) override;
Return<Status> setColorAspects(const ColorAspects& aspects) override;
Return<Status> setTimeOffsetUs(int64_t timeOffsetUs) override;
Return<Status> signalEndOfInputStream() override;
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.cpp b/media/libstagefright/omx/BWGraphicBufferSource.cpp
index 4e0f6dd..f2a454f 100644
--- a/media/libstagefright/omx/BWGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/BWGraphicBufferSource.cpp
@@ -145,9 +145,9 @@
}
::android::binder::Status BWGraphicBufferSource::setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) {
+ double fps, double captureFps) {
return Status::fromStatusT(mBase->setTimeLapseConfig(
- timePerFrameUs, timePerCaptureUs));
+ fps, captureFps));
}
::android::binder::Status BWGraphicBufferSource::setStartTimeUs(
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.h b/media/libstagefright/omx/BWGraphicBufferSource.h
index f1ce2af..43763c2 100644
--- a/media/libstagefright/omx/BWGraphicBufferSource.h
+++ b/media/libstagefright/omx/BWGraphicBufferSource.h
@@ -50,7 +50,7 @@
int64_t repeatAfterUs) override;
Status setMaxFps(float maxFps) override;
Status setTimeLapseConfig(
- int64_t timePerFrameUs, int64_t timePerCaptureUs) override;
+ double fps, double captureFps) override;
Status setStartTimeUs(int64_t startTimeUs) override;
Status setStopTimeUs(int64_t stopTimeUs) override;
Status setColorAspects(int32_t aspects) override;
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index afbde6a..0521460 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -26,6 +26,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/FileDescriptor.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
@@ -39,31 +40,242 @@
#include <inttypes.h>
#include "FrameDropper.h"
+#include <functional>
+#include <memory>
+#include <cmath>
+
namespace android {
+/**
+ * A copiable object managing a buffer in the buffer cache managed by the producer. This object
+ * holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
+ * whether it is still in a buffer slot. It also maintains whether there are any outstanging acquire
+ * references to it (by buffers acquired from the slot) mainly so that we can keep a debug
+ * count of how many buffers we need to still release back to the producer.
+ */
+struct GraphicBufferSource::CachedBuffer {
+ /**
+ * Token that is used to track acquire counts (as opposed to all references to this object).
+ */
+ struct Acquirable { };
+
+ /**
+ * Create using a buffer cached in a slot.
+ */
+ CachedBuffer(slot_id slot, const sp<GraphicBuffer> &graphicBuffer)
+ : mIsCached(true),
+ mSlot(slot),
+ mGraphicBuffer(graphicBuffer),
+ mAcquirable(std::make_shared<Acquirable>()) {
+ }
+
+ /**
+ * Returns the cache slot that this buffer is cached in, or -1 if it is no longer cached.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mIsCached ? mSlot : -1;
+ }
+
+ /**
+ * Returns the cached buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mGraphicBuffer;
+ }
+
+ /**
+ * Checks whether this buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mIsCached;
+ }
+
+ /**
+ * Checks whether this buffer has an acquired reference.
+ */
+ bool isAcquired() const {
+ return mAcquirable.use_count() > 1;
+ }
+
+ /**
+ * Gets and returns a shared acquired reference.
+ */
+ std::shared_ptr<Acquirable> getAcquirable() {
+ return mAcquirable;
+ }
+
+private:
+ friend void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t);
+
+ /**
+ * This method to be called when the buffer is no longer in the buffer cache.
+ * Called from discardBufferAtSlotIndex_l.
+ */
+ void onDroppedFromCache() {
+ CHECK_DBG(mIsCached);
+ mIsCached = false;
+ }
+
+ bool mIsCached;
+ slot_id mSlot;
+ sp<GraphicBuffer> mGraphicBuffer;
+ std::shared_ptr<Acquirable> mAcquirable;
+};
+
+/**
+ * A copiable object managing a buffer acquired from the producer. This must always be a cached
+ * buffer. This objects also manages its acquire fence and any release fences that may be returned
+ * by the encoder for this buffer (this buffer may be queued to the encoder multiple times).
+ * If no release fences are added by the encoder, the acquire fence is returned as the release
+ * fence for this - as it is assumed that noone waited for the acquire fence. Otherwise, it is
+ * assumed that the encoder has waited for the acquire fence (or returned it as the release
+ * fence).
+ */
+struct GraphicBufferSource::AcquiredBuffer {
+ AcquiredBuffer(
+ const std::shared_ptr<CachedBuffer> &buffer,
+ std::function<void(AcquiredBuffer *)> onReleased,
+ const sp<Fence> &acquireFence)
+ : mBuffer(buffer),
+ mAcquirable(buffer->getAcquirable()),
+ mAcquireFence(acquireFence),
+ mGotReleaseFences(false),
+ mOnReleased(onReleased) {
+ }
+
+ /**
+ * Adds a release fence returned by the encoder to this object. If this is called with an
+ * valid file descriptor, it is added to the list of release fences. These are returned to the
+ * producer on release() as a merged fence. Regardless of the validity of the file descriptor,
+ * we take note that a release fence was attempted to be added and the acquire fence can now be
+ * assumed as acquired.
+ */
+ void addReleaseFenceFd(int fenceFd) {
+ // save all release fences - these will be propagated to the producer if this buffer is
+ // ever released to it
+ if (fenceFd >= 0) {
+ mReleaseFenceFds.push_back(fenceFd);
+ }
+ mGotReleaseFences = true;
+ }
+
+ /**
+ * Returns the acquire fence file descriptor associated with this object.
+ */
+ int getAcquireFenceFd() {
+ if (mAcquireFence == nullptr || !mAcquireFence->isValid()) {
+ return -1;
+ }
+ return mAcquireFence->dup();
+ }
+
+ /**
+ * Returns whether the buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mBuffer->isCached();
+ }
+
+ /**
+ * Returns the acquired buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mBuffer->getGraphicBuffer();
+ }
+
+ /**
+ * Returns the slot that this buffer is cached at, or -1 otherwise.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mBuffer->getSlot();
+ }
+
+ /**
+ * Creates and returns a release fence object from the acquire fence and/or any release fences
+ * added. If no release fences were added (even if invalid), returns the acquire fence.
+ * Otherwise, it returns a merged fence from all the valid release fences added.
+ */
+ sp<Fence> getReleaseFence() {
+ // If did not receive release fences, we assume this buffer was not consumed (it was
+ // discarded or dropped). In this case release the acquire fence as the release fence.
+ // We do this here to avoid a dup, close and recreation of the Fence object.
+ if (!mGotReleaseFences) {
+ return mAcquireFence;
+ }
+ sp<Fence> ret = getReleaseFence(0, mReleaseFenceFds.size());
+ // clear fds as fence took ownership of them
+ mReleaseFenceFds.clear();
+ return ret;
+ }
+
+ // this video buffer is no longer referenced by the codec (or kept for later encoding)
+ // it is now safe to release to the producer
+ ~AcquiredBuffer() {
+ //mAcquirable.clear();
+ mOnReleased(this);
+ // mOnRelease method should call getReleaseFence() that releases all fds but just in case
+ ALOGW_IF(!mReleaseFenceFds.empty(), "release fences were not obtained, closing fds");
+ for (int fildes : mReleaseFenceFds) {
+ ::close(fildes);
+ TRESPASS_DBG();
+ }
+ }
+
+private:
+ std::shared_ptr<GraphicBufferSource::CachedBuffer> mBuffer;
+ std::shared_ptr<GraphicBufferSource::CachedBuffer::Acquirable> mAcquirable;
+ sp<Fence> mAcquireFence;
+ Vector<int> mReleaseFenceFds;
+ bool mGotReleaseFences;
+ std::function<void(AcquiredBuffer *)> mOnReleased;
+
+ /**
+ * Creates and returns a release fence from 0 or more release fence file descriptors in from
+ * the specified range in the array.
+ *
+ * @param start start index
+ * @param num number of release fds to merge
+ */
+ sp<Fence> getReleaseFence(size_t start, size_t num) const {
+ if (num == 0) {
+ return Fence::NO_FENCE;
+ } else if (num == 1) {
+ return new Fence(mReleaseFenceFds[start]);
+ } else {
+ return Fence::merge("GBS::AB",
+ getReleaseFence(start, num >> 1),
+ getReleaseFence(start + (num >> 1), num - (num >> 1)));
+ }
+ }
+};
+
GraphicBufferSource::GraphicBufferSource() :
mInitCheck(UNKNOWN_ERROR),
+ mNumAvailableUnacquiredBuffers(0),
+ mNumOutstandingAcquires(0),
+ mEndOfStream(false),
+ mEndOfStreamSent(false),
+ mLastDataspace(HAL_DATASPACE_UNKNOWN),
mExecuting(false),
mSuspended(false),
mStopTimeUs(-1),
- mLastDataSpace(HAL_DATASPACE_UNKNOWN),
- mNumFramesAvailable(0),
- mNumBufferAcquired(0),
- mEndOfStream(false),
- mEndOfStreamSent(false),
mLastActionTimeUs(-1ll),
- mPrevOriginalTimeUs(-1ll),
mSkipFramesBeforeNs(-1ll),
- mRepeatAfterUs(-1ll),
+ mFrameRepeatIntervalUs(-1ll),
mRepeatLastFrameGeneration(0),
- mRepeatLastFrameTimestamp(-1ll),
- mRepeatLastFrameCount(0),
- mLatestBufferId(-1),
- mLatestBufferFrameNum(0),
- mLatestBufferFence(Fence::NO_FENCE),
- mRepeatBufferDeferred(false),
- mTimePerCaptureUs(-1ll),
- mTimePerFrameUs(-1ll),
+ mOutstandingFrameRepeatCount(0),
+ mFrameRepeatBlockedOnCodecBuffer(false),
+ mFps(-1.0),
+ mCaptureFps(-1.0),
+ mBaseCaptureUs(-1ll),
+ mBaseFrameUs(-1ll),
+ mFrameCount(0),
mPrevCaptureUs(-1ll),
mPrevFrameUs(-1ll),
mInputBufferTimeOffsetUs(0ll) {
@@ -90,18 +302,25 @@
return;
}
- memset(&mColorAspectsPacked, 0, sizeof(mColorAspectsPacked));
+ memset(&mDefaultColorAspectsPacked, 0, sizeof(mDefaultColorAspectsPacked));
CHECK(mInitCheck == NO_ERROR);
}
GraphicBufferSource::~GraphicBufferSource() {
ALOGV("~GraphicBufferSource");
- if (mLatestBufferId >= 0) {
- releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
+ {
+ // all acquired buffers must be freed with the mutex locked otherwise our debug assertion
+ // may trigger
+ Mutex::Autolock autoLock(mMutex);
+ mAvailableBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
}
- if (mNumBufferAcquired != 0) {
- ALOGW("potential buffer leak (acquired %d)", mNumBufferAcquired);
+
+ if (mNumOutstandingAcquires != 0) {
+ ALOGW("potential buffer leak: acquired=%d", mNumOutstandingAcquires);
+ TRESPASS_DBG();
}
if (mConsumer != NULL) {
status_t err = mConsumer->consumerDisconnect();
@@ -113,11 +332,11 @@
Status GraphicBufferSource::onOmxExecuting() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("--> executing; avail=%zu, codec vec size=%zd",
- mNumFramesAvailable, mCodecBuffers.size());
+ ALOGV("--> executing; available=%zu, submittable=%zd",
+ mAvailableBuffers.size(), mFreeCodecBuffers.size());
CHECK(!mExecuting);
mExecuting = true;
- mLastDataSpace = HAL_DATASPACE_UNKNOWN;
+ mLastDataspace = HAL_DATASPACE_UNKNOWN;
ALOGV("clearing last dataSpace");
// Start by loading up as many buffers as possible. We want to do this,
@@ -129,35 +348,32 @@
// one codec buffer simultaneously. (We could instead try to submit
// all BQ buffers whenever any codec buffer is freed, but if we get the
// initial conditions right that will never be useful.)
- while (mNumFramesAvailable) {
+ while (haveAvailableBuffers_l()) {
if (!fillCodecBuffer_l()) {
- ALOGV("stop load with frames available (codecAvail=%d)",
- isCodecBufferAvailable_l());
+ ALOGV("stop load with available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
break;
}
}
- ALOGV("done loading initial frames, avail=%zu", mNumFramesAvailable);
+ ALOGV("done loading initial frames, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
// If EOS has already been signaled, and there are no more frames to
// submit, try to send EOS now as well.
- if (mStopTimeUs == -1 && mEndOfStream && mNumFramesAvailable == 0) {
+ if (mStopTimeUs == -1 && mEndOfStream && !haveAvailableBuffers_l()) {
submitEndOfInputStream_l();
}
- if (mRepeatAfterUs > 0ll && mLooper == NULL) {
+ if (mFrameRepeatIntervalUs > 0ll && mLooper == NULL) {
mReflector = new AHandlerReflector<GraphicBufferSource>(this);
mLooper = new ALooper;
mLooper->registerHandler(mReflector);
mLooper->start();
- if (mLatestBufferId >= 0) {
- sp<AMessage> msg =
- new AMessage(kWhatRepeatLastFrame, mReflector);
-
- msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
+ if (mLatestBuffer.mBuffer != nullptr) {
+ queueFrameRepeat_l();
}
}
@@ -179,11 +395,6 @@
Status GraphicBufferSource::onOmxLoaded(){
Mutex::Autolock autoLock(mMutex);
- if (!mExecuting) {
- // This can happen if something failed very early.
- ALOGW("Dropped back down to Loaded without Executing");
- }
-
if (mLooper != NULL) {
mLooper->unregisterHandler(mReflector->id());
mReflector.clear();
@@ -192,37 +403,21 @@
mLooper.clear();
}
- ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d acquired=%d",
- mNumFramesAvailable, mEndOfStream, mEndOfStreamSent, mNumBufferAcquired);
+ ALOGV("--> loaded; available=%zu+%d eos=%d eosSent=%d acquired=%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers,
+ mEndOfStream, mEndOfStreamSent, mNumOutstandingAcquires);
// Codec is no longer executing. Releasing all buffers to bq.
- for (int i = (int)mCodecBuffers.size() - 1; i >= 0; --i) {
- if (mCodecBuffers[i].mGraphicBuffer != NULL) {
- int id = mCodecBuffers[i].mSlot;
- if (id != mLatestBufferId) {
- ALOGV("releasing buffer for codec: slot=%d, useCount=%d, latest=%d",
- id, mBufferUseCount[id], mLatestBufferId);
- sp<Fence> fence = new Fence(-1);
- releaseBuffer(id, mCodecBuffers[i].mFrameNumber, fence);
- mBufferUseCount[id] = 0;
- }
- }
- }
- // Also release the latest buffer
- if (mLatestBufferId >= 0) {
- releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
- mBufferUseCount[mLatestBufferId] = 0;
- mLatestBufferId = -1;
- }
-
- mCodecBuffers.clear();
+ mFreeCodecBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
mOMXNode.clear();
mExecuting = false;
return Status::ok();
}
-Status GraphicBufferSource::onInputBufferAdded(int32_t bufferID) {
+Status GraphicBufferSource::onInputBufferAdded(codec_buffer_id bufferId) {
Mutex::Autolock autoLock(mMutex);
if (mExecuting) {
@@ -232,145 +427,115 @@
return Status::fromServiceSpecificError(INVALID_OPERATION);
}
- ALOGV("addCodecBuffer: bufferID=%u", bufferID);
+ ALOGV("addCodecBuffer: bufferId=%u", bufferId);
- CodecBuffer codecBuffer;
- codecBuffer.mBufferID = bufferID;
- mCodecBuffers.add(codecBuffer);
+ mFreeCodecBuffers.push_back(bufferId);
return Status::ok();
}
-Status GraphicBufferSource::onInputBufferEmptied(
- int32_t bufferID, int fenceFd) {
+Status GraphicBufferSource::onInputBufferEmptied(codec_buffer_id bufferId, int fenceFd) {
Mutex::Autolock autoLock(mMutex);
- if (!mExecuting) {
- if (fenceFd >= 0) {
- ::close(fenceFd);
- }
- return Status::fromServiceSpecificError(INVALID_OPERATION);
- }
+ FileDescriptor::Autoclose fence(fenceFd);
- int cbi = findMatchingCodecBuffer_l(bufferID);
+ ssize_t cbi = mSubmittedCodecBuffers.indexOfKey(bufferId);
if (cbi < 0) {
// This should never happen.
- ALOGE("codecBufferEmptied: buffer not recognized (bufferID=%u)", bufferID);
- if (fenceFd >= 0) {
- ::close(fenceFd);
- }
+ ALOGE("onInputBufferEmptied: buffer not recognized (bufferId=%u)", bufferId);
return Status::fromServiceSpecificError(BAD_VALUE);
}
- ALOGV("codecBufferEmptied: bufferID=%u, cbi=%d", bufferID, cbi);
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
+ std::shared_ptr<AcquiredBuffer> buffer = mSubmittedCodecBuffers.valueAt(cbi);
+
+ // Move buffer to available buffers
+ mSubmittedCodecBuffers.removeItemsAt(cbi);
+ mFreeCodecBuffers.push_back(bufferId);
// header->nFilledLen may not be the original value, so we can't compare
// that to zero to see of this was the EOS buffer. Instead we just
- // see if the GraphicBuffer reference was null, which should only ever
- // happen for EOS.
- if (codecBuffer.mGraphicBuffer == NULL) {
+ // see if there is a null AcquiredBuffer, which should only ever happen for EOS.
+ if (buffer == nullptr) {
if (!(mEndOfStream && mEndOfStreamSent)) {
- // This can happen when broken code sends us the same buffer
- // twice in a row.
- ALOGE("ERROR: codecBufferEmptied on non-EOS null buffer "
- "(buffer emptied twice?)");
+ // This can happen when broken code sends us the same buffer twice in a row.
+ ALOGE("onInputBufferEmptied: non-EOS null buffer (bufferId=%u)", bufferId);
+ } else {
+ ALOGV("onInputBufferEmptied: EOS null buffer (bufferId=%u@%zd)", bufferId, cbi);
}
- // No GraphicBuffer to deal with, no additional input or output is
- // expected, so just return.
- if (fenceFd >= 0) {
- ::close(fenceFd);
- }
+ // No GraphicBuffer to deal with, no additional input or output is expected, so just return.
return Status::fromServiceSpecificError(BAD_VALUE);
}
- // Find matching entry in our cached copy of the BufferQueue slots.
- // If we find a match, release that slot. If we don't, the BufferQueue
- // has dropped that GraphicBuffer, and there's nothing for us to release.
- int id = codecBuffer.mSlot;
- sp<Fence> fence = new Fence(fenceFd);
- if (mBufferSlot[id] != NULL &&
- mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
- mBufferUseCount[id]--;
-
- if (mBufferUseCount[id] < 0) {
- ALOGW("mBufferUseCount for bq slot %d < 0 (=%d)", id, mBufferUseCount[id]);
- mBufferUseCount[id] = 0;
- }
- if (id != mLatestBufferId && mBufferUseCount[id] == 0) {
- releaseBuffer(id, codecBuffer.mFrameNumber, fence);
- }
- ALOGV("codecBufferEmptied: slot=%d, cbi=%d, useCount=%d, acquired=%d, handle=%p",
- id, cbi, mBufferUseCount[id], mNumBufferAcquired, mBufferSlot[id]->handle);
- } else {
- ALOGV("codecBufferEmptied: no match for emptied buffer, "
- "slot=%d, cbi=%d, useCount=%d, acquired=%d",
- id, cbi, mBufferUseCount[id], mNumBufferAcquired);
- // we will not reuse codec buffer, so there is no need to wait for fence
+ if (!mExecuting) {
+ // this is fine since this could happen when going from Idle to Loaded
+ ALOGV("onInputBufferEmptied: no longer executing (bufferId=%u@%zd)", bufferId, cbi);
+ return Status::fromServiceSpecificError(OK);
}
- // Mark the codec buffer as available by clearing the GraphicBuffer ref.
- codecBuffer.mGraphicBuffer = NULL;
+ ALOGV("onInputBufferEmptied: bufferId=%d@%zd [slot=%d, useCount=%ld, handle=%p] acquired=%d",
+ bufferId, cbi, buffer->getSlot(), buffer.use_count(), buffer->getGraphicBuffer()->handle,
+ mNumOutstandingAcquires);
- if (mNumFramesAvailable) {
+ buffer->addReleaseFenceFd(fence.release());
+ // release codec reference for video buffer just in case remove does not it
+ buffer.reset();
+
+ if (haveAvailableBuffers_l()) {
// Fill this codec buffer.
CHECK(!mEndOfStreamSent);
- ALOGV("buffer freed, %zu frames avail (eos=%d)",
- mNumFramesAvailable, mEndOfStream);
+ ALOGV("onInputBufferEmptied: buffer freed, feeding codec (available=%zu+%d, eos=%d)",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
fillCodecBuffer_l();
} else if (mEndOfStream && mStopTimeUs == -1) {
// No frames available, but EOS is pending and no stop time, so use this buffer to
// send that.
- ALOGV("buffer freed, EOS pending");
+ ALOGV("onInputBufferEmptied: buffer freed, submitting EOS");
submitEndOfInputStream_l();
- } else if (mRepeatBufferDeferred) {
+ } else if (mFrameRepeatBlockedOnCodecBuffer) {
bool success = repeatLatestBuffer_l();
- if (success) {
- ALOGV("deferred repeatLatestBuffer_l SUCCESS");
- } else {
- ALOGV("deferred repeatLatestBuffer_l FAILURE");
- }
- mRepeatBufferDeferred = false;
+ ALOGV("onInputBufferEmptied: completing deferred repeatLatestBuffer_l %s",
+ success ? "SUCCESS" : "FAILURE");
+ mFrameRepeatBlockedOnCodecBuffer = false;
}
+ // releaseReleasableBuffers_l();
return Status::ok();
}
-void GraphicBufferSource::onDataSpaceChanged_l(
- android_dataspace dataSpace, android_pixel_format pixelFormat) {
- ALOGD("got buffer with new dataSpace #%x", dataSpace);
- mLastDataSpace = dataSpace;
+void GraphicBufferSource::onDataspaceChanged_l(
+ android_dataspace dataspace, android_pixel_format pixelFormat) {
+ ALOGD("got buffer with new dataSpace #%x", dataspace);
+ mLastDataspace = dataspace;
- if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
- mOMXNode->dispatchDataSpaceChanged(mLastDataSpace, mColorAspectsPacked, pixelFormat);
+ if (ColorUtils::convertDataSpaceToV0(dataspace)) {
+ mOMXNode->dispatchDataSpaceChanged(mLastDataspace, mDefaultColorAspectsPacked, pixelFormat);
}
}
bool GraphicBufferSource::fillCodecBuffer_l() {
- CHECK(mExecuting && mNumFramesAvailable > 0);
+ CHECK(mExecuting && haveAvailableBuffers_l());
- if (mSuspended && mActionQueue.empty()) {
- return false;
- }
-
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
+ if (mFreeCodecBuffers.empty()) {
// No buffers available, bail.
- ALOGV("fillCodecBuffer_l: no codec buffers, avail now %zu",
- mNumFramesAvailable);
+ ALOGV("fillCodecBuffer_l: no codec buffers, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
return false;
}
- ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%zu",
- mNumFramesAvailable);
- BufferItem item;
- status_t err = acquireBuffer(&item);
- if (err != OK) {
- ALOGE("fillCodecBuffer_l: acquireBuffer returned err=%d", err);
- return false;
+ VideoBuffer item;
+ if (mAvailableBuffers.empty()) {
+ ALOGV("fillCodecBuffer_l: acquiring available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGE("fillCodecBuffer_l: failed to acquire available buffer");
+ return false;
+ }
+ } else {
+ ALOGV("fillCodecBuffer_l: getting available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ item = *mAvailableBuffers.begin();
+ mAvailableBuffers.erase(mAvailableBuffers.begin());
}
- int64_t itemTimeUs = item.mTimestamp / 1000;
-
- mNumFramesAvailable--;
+ int64_t itemTimeUs = item.mTimestampNs / 1000;
// Process ActionItem in the Queue if there is any. If a buffer's timestamp
// is smaller than the first action's timestamp, no action need to be performed.
@@ -382,7 +547,6 @@
// [pause 1us], [resume 2us], [pause 3us], [resume 4us], [pause 5us].... Upon
// receiving a buffer with timestamp 3.5us, only the action [pause, 3us] needs
// to be handled and [pause, 1us], [resume 2us] will be discarded.
- bool dropped = false;
bool done = false;
bool seeStopAction = false;
if (!mActionQueue.empty()) {
@@ -394,7 +558,6 @@
// All the actions are ahead. No action need to perform now.
// Release the buffer if is in suspended state, or process the buffer
// if not in suspended state.
- dropped = mSuspended;
done = true;
}
@@ -402,7 +565,8 @@
// Find the newest action that with timestamp smaller than itemTimeUs. Then
// remove all the actions before and include the newest action.
List<ActionItem>::iterator it = mActionQueue.begin();
- while (it != mActionQueue.end() && it->mActionTimeUs <= itemTimeUs) {
+ while (it != mActionQueue.end() && it->mActionTimeUs <= itemTimeUs
+ && nextAction.mAction != ActionItem::STOP) {
nextAction = *it;
++it;
}
@@ -413,7 +577,6 @@
case ActionItem::PAUSE:
{
mSuspended = true;
- dropped = true;
ALOGV("RUNNING/PAUSE -> PAUSE at buffer %lld us PAUSE Time: %lld us",
(long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
break;
@@ -429,242 +592,214 @@
{
ALOGV("RUNNING/PAUSE -> STOP at buffer %lld us STOP Time: %lld us",
(long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
- dropped = true;
// Clear the whole ActionQueue as recording is done
mActionQueue.clear();
seeStopAction = true;
break;
}
default:
- ALOGE("Unknown action type");
+ TRESPASS_DBG("Unknown action type");
+ // return true here because we did consume an available buffer, so the
+ // loop in onOmxExecuting will eventually terminate even if we hit this.
return false;
}
}
}
- if (dropped) {
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
- if (seeStopAction) {
- // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
- if (!releaseAllBuffers()) {
- ALOGW("Failed to release all the buffers when handling STOP action");
- }
- mEndOfStream = true;
- submitEndOfInputStream_l();
- }
+ if (seeStopAction) {
+ // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
+ releaseAllAvailableBuffers_l();
+ mEndOfStream = true;
+ submitEndOfInputStream_l();
return true;
}
- if (item.mDataSpace != mLastDataSpace) {
- onDataSpaceChanged_l(
- item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
+ if (mSuspended) {
+ return true;
}
- err = UNKNOWN_ERROR;
+ int err = UNKNOWN_ERROR;
// only submit sample if start time is unspecified, or sample
// is queued after the specified start time
- if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
+ if (mSkipFramesBeforeNs < 0ll || item.mTimestampNs >= mSkipFramesBeforeNs) {
// if start time is set, offset time stamp by start time
if (mSkipFramesBeforeNs > 0) {
- item.mTimestamp -= mSkipFramesBeforeNs;
+ item.mTimestampNs -= mSkipFramesBeforeNs;
}
- int64_t timeUs = item.mTimestamp / 1000;
+ int64_t timeUs = item.mTimestampNs / 1000;
if (mFrameDropper != NULL && mFrameDropper->shouldDrop(timeUs)) {
ALOGV("skipping frame (%lld) to meet max framerate", static_cast<long long>(timeUs));
// set err to OK so that the skipped frame can still be saved as the lastest frame
err = OK;
- dropped = true;
} else {
- err = submitBuffer_l(item, cbi);
+ err = submitBuffer_l(item); // this takes shared ownership of the acquired buffer on succeess
}
}
if (err != OK) {
- ALOGV("submitBuffer_l failed, releasing bq slot %d", item.mSlot);
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+ ALOGV("submitBuffer_l failed, will release bq slot %d", item.mBuffer->getSlot());
+ return true;
} else {
// Don't set the last buffer id if we're not repeating,
// we'll be holding on to the last buffer for nothing.
- if (mRepeatAfterUs > 0ll) {
+ if (mFrameRepeatIntervalUs > 0ll) {
setLatestBuffer_l(item);
}
- if (!dropped) {
- ++mBufferUseCount[item.mSlot];
- }
- ALOGV("buffer submitted: slot=%d, cbi=%d, useCount=%d, acquired=%d",
- item.mSlot, cbi, mBufferUseCount[item.mSlot], mNumBufferAcquired);
+ ALOGV("buffer submitted [slot=%d, useCount=%ld] acquired=%d",
+ item.mBuffer->getSlot(), item.mBuffer.use_count(), mNumOutstandingAcquires);
}
return true;
}
bool GraphicBufferSource::repeatLatestBuffer_l() {
- CHECK(mExecuting && mNumFramesAvailable == 0);
+ CHECK(mExecuting && !haveAvailableBuffers_l());
- if (mLatestBufferId < 0 || mSuspended) {
- return false;
- }
- if (mBufferSlot[mLatestBufferId] == NULL) {
- // This can happen if the remote side disconnects, causing
- // onBuffersReleased() to NULL out our copy of the slots. The
- // buffer is gone, so we have nothing to show.
- //
- // To be on the safe side we try to release the buffer.
- ALOGD("repeatLatestBuffer_l: slot was NULL");
- mConsumer->releaseBuffer(
- mLatestBufferId,
- mLatestBufferFrameNum,
- EGL_NO_DISPLAY,
- EGL_NO_SYNC_KHR,
- mLatestBufferFence);
- mLatestBufferId = -1;
- mLatestBufferFrameNum = 0;
- mLatestBufferFence = Fence::NO_FENCE;
+ if (mLatestBuffer.mBuffer == nullptr || mSuspended) {
return false;
}
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
+ if (mFreeCodecBuffers.empty()) {
// No buffers available, bail.
ALOGV("repeatLatestBuffer_l: no codec buffers.");
return false;
}
- BufferItem item;
- item.mSlot = mLatestBufferId;
- item.mFrameNumber = mLatestBufferFrameNum;
- item.mTimestamp = mRepeatLastFrameTimestamp;
- item.mFence = mLatestBufferFence;
+ if (!mLatestBuffer.mBuffer->isCached()) {
+ ALOGV("repeatLatestBuffer_l: slot was discarded, but repeating our own reference");
+ }
- status_t err = submitBuffer_l(item, cbi);
-
+ // it is ok to update the timestamp of latest buffer as it is only used for submission
+ status_t err = submitBuffer_l(mLatestBuffer);
if (err != OK) {
return false;
}
- ++mBufferUseCount[item.mSlot];
-
/* repeat last frame up to kRepeatLastFrameCount times.
* in case of static scene, a single repeat might not get rid of encoder
* ghosting completely, refresh a couple more times to get better quality
*/
- if (--mRepeatLastFrameCount > 0) {
- mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
-
- if (mReflector != NULL) {
- sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
- msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
- }
+ if (--mOutstandingFrameRepeatCount > 0) {
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
}
return true;
}
-void GraphicBufferSource::setLatestBuffer_l(const BufferItem &item) {
- if (mLatestBufferId >= 0 && mBufferUseCount[mLatestBufferId] == 0) {
- releaseBuffer(mLatestBufferId, mLatestBufferFrameNum, mLatestBufferFence);
- // mLatestBufferFence will be set to new fence just below
- }
+void GraphicBufferSource::setLatestBuffer_l(const VideoBuffer &item) {
+ mLatestBuffer = item;
- mLatestBufferId = item.mSlot;
- mLatestBufferFrameNum = item.mFrameNumber;
- mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
+ ALOGV("setLatestBuffer_l: [slot=%d, useCount=%ld]",
+ mLatestBuffer.mBuffer->getSlot(), mLatestBuffer.mBuffer.use_count());
- ALOGV("setLatestBuffer_l: slot=%d, useCount=%d",
- item.mSlot, mBufferUseCount[item.mSlot]);
+ mOutstandingFrameRepeatCount = kRepeatLastFrameCount;
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
+}
- mRepeatBufferDeferred = false;
- mRepeatLastFrameCount = kRepeatLastFrameCount;
- mLatestBufferFence = item.mFence;
+void GraphicBufferSource::queueFrameRepeat_l() {
+ mFrameRepeatBlockedOnCodecBuffer = false;
if (mReflector != NULL) {
sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
+ msg->post(mFrameRepeatIntervalUs);
}
}
-bool GraphicBufferSource::getTimestamp(
- const BufferItem &item, int64_t *codecTimeUs) {
- int64_t timeUs = item.mTimestamp / 1000;
+bool GraphicBufferSource::calculateCodecTimestamp_l(
+ nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
+ int64_t timeUs = bufferTimeNs / 1000;
timeUs += mInputBufferTimeOffsetUs;
- if (mTimePerCaptureUs > 0ll
- && (mTimePerCaptureUs > 2 * mTimePerFrameUs
- || mTimePerFrameUs > 2 * mTimePerCaptureUs)) {
+ if (mCaptureFps > 0.
+ && (mFps > 2 * mCaptureFps
+ || mCaptureFps > 2 * mFps)) {
// Time lapse or slow motion mode
if (mPrevCaptureUs < 0ll) {
// first capture
- mPrevCaptureUs = timeUs;
+ mPrevCaptureUs = mBaseCaptureUs = timeUs;
// adjust the first sample timestamp.
- mPrevFrameUs = (timeUs * mTimePerFrameUs) / mTimePerCaptureUs;
+ mPrevFrameUs = mBaseFrameUs =
+ std::llround((timeUs * mCaptureFps) / mFps);
+ mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = (timeUs + mTimePerCaptureUs / 2 - mPrevCaptureUs)
- / mTimePerCaptureUs;
+ int64_t nFrames = std::llround(
+ (timeUs - mPrevCaptureUs) * mCaptureFps);
if (nFrames <= 0) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
return false;
}
- mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
- mPrevFrameUs += mTimePerFrameUs * nFrames;
+ mFrameCount += nFrames;
+ mPrevCaptureUs = mBaseCaptureUs + std::llround(
+ mFrameCount / mCaptureFps);
+ mPrevFrameUs = mBaseFrameUs + std::llround(
+ mFrameCount / mFps);
}
ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
static_cast<long long>(timeUs),
static_cast<long long>(mPrevCaptureUs),
static_cast<long long>(mPrevFrameUs));
-
- *codecTimeUs = mPrevFrameUs;
- return true;
} else {
- int64_t originalTimeUs = timeUs;
- if (originalTimeUs <= mPrevOriginalTimeUs) {
- // Drop the frame if it's going backward in time. Bad timestamp
- // could disrupt encoder's rate control completely.
+ if (timeUs <= mPrevFrameUs) {
+ // Drop the frame if it's going backward in time. Bad timestamp
+ // could disrupt encoder's rate control completely.
ALOGW("Dropping frame that's going backward in time");
return false;
}
- mPrevOriginalTimeUs = originalTimeUs;
+ mPrevFrameUs = timeUs;
}
- *codecTimeUs = timeUs;
+ *codecTimeUs = mPrevFrameUs;
return true;
}
-status_t GraphicBufferSource::submitBuffer_l(const BufferItem &item, int cbi) {
- ALOGV("submitBuffer_l: slot=%d, cbi=%d", item.mSlot, cbi);
+status_t GraphicBufferSource::submitBuffer_l(const VideoBuffer &item) {
+ CHECK(!mFreeCodecBuffers.empty());
+ IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
+
+ ALOGV("submitBuffer_l [slot=%d, bufferId=%d]", item.mBuffer->getSlot(), codecBufferId);
int64_t codecTimeUs;
- if (!getTimestamp(item, &codecTimeUs)) {
+ if (!calculateCodecTimestamp_l(item.mTimestampNs, &codecTimeUs)) {
return UNKNOWN_ERROR;
}
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
- codecBuffer.mGraphicBuffer = mBufferSlot[item.mSlot];
- codecBuffer.mSlot = item.mSlot;
- codecBuffer.mFrameNumber = item.mFrameNumber;
+ if ((android_dataspace)item.mDataspace != mLastDataspace) {
+ onDataspaceChanged_l(
+ item.mDataspace,
+ (android_pixel_format)item.mBuffer->getGraphicBuffer()->format);
+ }
- IOMX::buffer_id bufferID = codecBuffer.mBufferID;
- const sp<GraphicBuffer> &buffer = codecBuffer.mGraphicBuffer;
- int fenceID = item.mFence->isValid() ? item.mFence->dup() : -1;
-
+ std::shared_ptr<AcquiredBuffer> buffer = item.mBuffer;
+ // use a GraphicBuffer for now as OMXNodeInstance is using GraphicBuffers to hold references
+ // and it requires this graphic buffer to be able to hold its reference
+ // and thus we would need to create a new GraphicBuffer from an ANWBuffer separate from the
+ // acquired GraphicBuffer.
+ // TODO: this can be reworked globally to use ANWBuffer references
+ sp<GraphicBuffer> graphicBuffer = buffer->getGraphicBuffer();
status_t err = mOMXNode->emptyBuffer(
- bufferID, OMX_BUFFERFLAG_ENDOFFRAME, buffer, codecTimeUs, fenceID);
+ codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME, graphicBuffer, codecTimeUs,
+ buffer->getAcquireFenceFd());
if (err != OK) {
ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
- codecBuffer.mGraphicBuffer = NULL;
return err;
}
- ALOGV("emptyGraphicBuffer succeeded, bufferID=%u buf=%p bufhandle=%p",
- bufferID, buffer->getNativeBuffer(), buffer->handle);
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, buffer);
+ ALOGV("emptyGraphicBuffer succeeded, bufferId=%u@%zd bufhandle=%p",
+ codecBufferId, cbix, graphicBuffer->handle);
return OK;
}
@@ -675,119 +810,136 @@
return;
}
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
+ if (mFreeCodecBuffers.empty()) {
ALOGV("submitEndOfInputStream_l: no codec buffers available");
return;
}
+ IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
- // We reject any additional incoming graphic buffers, so there's no need
- // to stick a placeholder into codecBuffer.mGraphicBuffer to mark it as
- // in-use.
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
- IOMX::buffer_id bufferID = codecBuffer.mBufferID;
-
- status_t err = mOMXNode->emptyBuffer(bufferID,
- OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS);
+ // We reject any additional incoming graphic buffers. There is no acquired buffer used for EOS
+ status_t err = mOMXNode->emptyBuffer(
+ codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS);
if (err != OK) {
ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
} else {
- ALOGV("submitEndOfInputStream_l: buffer submitted, bufferID=%u cbi=%d",
- bufferID, cbi);
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, nullptr);
+ ALOGV("submitEndOfInputStream_l: buffer submitted, bufferId=%u@%zd", codecBufferId, cbix);
mEndOfStreamSent = true;
}
}
-int GraphicBufferSource::findAvailableCodecBuffer_l() {
- CHECK(mCodecBuffers.size() > 0);
-
- for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
- if (mCodecBuffers[i].mGraphicBuffer == NULL) {
- return i;
- }
- }
- return -1;
-}
-
-int GraphicBufferSource::findMatchingCodecBuffer_l(IOMX::buffer_id bufferID) {
- for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
- if (mCodecBuffers[i].mBufferID == bufferID) {
- return i;
- }
- }
- return -1;
-}
-
-status_t GraphicBufferSource::acquireBuffer(BufferItem *bi) {
- status_t err = mConsumer->acquireBuffer(bi, 0);
+status_t GraphicBufferSource::acquireBuffer_l(VideoBuffer *ab) {
+ BufferItem bi;
+ status_t err = mConsumer->acquireBuffer(&bi, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// shouldn't happen
- ALOGW("acquireBuffer: frame was not available");
+ ALOGW("acquireBuffer_l: frame was not available");
return err;
} else if (err != OK) {
- ALOGW("acquireBuffer: failed with err=%d", err);
+ ALOGW("acquireBuffer_l: failed with err=%d", err);
return err;
}
- // If this is the first time we're seeing this buffer, add it to our
- // slot table.
- if (bi->mGraphicBuffer != NULL) {
- ALOGV("acquireBuffer: setting mBufferSlot %d", bi->mSlot);
- mBufferSlot[bi->mSlot] = bi->mGraphicBuffer;
- mBufferUseCount[bi->mSlot] = 0;
+ --mNumAvailableUnacquiredBuffers;
+
+ // Manage our buffer cache.
+ std::shared_ptr<CachedBuffer> buffer;
+ ssize_t bsi = mBufferSlots.indexOfKey(bi.mSlot);
+ if (bi.mGraphicBuffer != NULL) {
+ // replace/initialize slot with new buffer
+ ALOGV("acquireBuffer_l: %s buffer slot %d", bsi < 0 ? "setting" : "UPDATING", bi.mSlot);
+ if (bsi >= 0) {
+ discardBufferAtSlotIndex_l(bsi);
+ } else {
+ bsi = mBufferSlots.add(bi.mSlot, nullptr);
+ }
+ buffer = std::make_shared<CachedBuffer>(bi.mSlot, bi.mGraphicBuffer);
+ mBufferSlots.replaceValueAt(bsi, buffer);
+ } else {
+ buffer = mBufferSlots.valueAt(bsi);
}
- mNumBufferAcquired++;
+ int64_t frameNum = bi.mFrameNumber;
+
+ std::shared_ptr<AcquiredBuffer> acquiredBuffer =
+ std::make_shared<AcquiredBuffer>(
+ buffer,
+ [frameNum, this](AcquiredBuffer *buffer){
+ // AcquiredBuffer's destructor should always be called when mMutex is locked.
+ // If we had a reentrant mutex, we could just lock it again to ensure this.
+ if (mMutex.tryLock() == 0) {
+ TRESPASS_DBG();
+ mMutex.unlock();
+ }
+
+ // we can release buffers immediately if not using adapters
+ // alternately, we could add them to mSlotsToRelease, but we would
+ // somehow need to propagate frame number to that queue
+ if (buffer->isCached()) {
+ --mNumOutstandingAcquires;
+ mConsumer->releaseBuffer(
+ buffer->getSlot(), frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
+ buffer->getReleaseFence());
+ }
+ },
+ bi.mFence);
+ VideoBuffer videoBuffer{acquiredBuffer, bi.mTimestamp, bi.mDataSpace};
+ *ab = videoBuffer;
+ ++mNumOutstandingAcquires;
return OK;
}
-/*
- * Releases an acquired buffer back to the consumer.
- *
- * id: buffer slot to release
- * frameNum: frame number of the frame being released
- * fence: fence of the frame being released
- */
-void GraphicBufferSource::releaseBuffer(
- int id, uint64_t frameNum, const sp<Fence> &fence) {
- ALOGV("releaseBuffer: slot=%d", id);
- mConsumer->releaseBuffer(
- id, frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
- mNumBufferAcquired--;
-}
-
// BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onFrameAvailable(const BufferItem& /*item*/) {
+void GraphicBufferSource::onFrameAvailable(const BufferItem& item __unused) {
Mutex::Autolock autoLock(mMutex);
- ALOGV("onFrameAvailable exec=%d avail=%zu",
- mExecuting, mNumFramesAvailable);
+ ALOGV("onFrameAvailable: executing=%d available=%zu+%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ ++mNumAvailableUnacquiredBuffers;
- if (mOMXNode == NULL || mEndOfStreamSent || (mSuspended && mActionQueue.empty())) {
- if (mEndOfStreamSent) {
- // This should only be possible if a new buffer was queued after
- // EOS was signaled, i.e. the app is misbehaving.
+ // For BufferQueue we cannot acquire a buffer if we cannot immediately feed it to the codec
+ // OR we are discarding this buffer (acquiring and immediately releasing it), which makes
+ // this an ugly logic.
+ // NOTE: We could also rely on our debug counter but that is meant only as a debug counter.
+ if (!areWeDiscardingAvailableBuffers_l() && mFreeCodecBuffers.empty()) {
+ // we may not be allowed to acquire a possibly encodable buffer, so just note that
+ // it is available
+ ALOGV("onFrameAvailable: cannot acquire buffer right now, do it later");
- ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
- } else {
- ALOGV("onFrameAvailable: suspended, ignoring frame");
- }
-
- BufferItem item;
- status_t err = acquireBuffer(&item);
- if (err == OK) {
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
- } else {
- ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
- }
- return;
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
}
- mNumFramesAvailable++;
+ VideoBuffer buffer;
+ status_t err = acquireBuffer_l(&buffer);
+ if (err != OK) {
+ ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
+ } else {
+ onBufferAcquired_l(buffer);
+ }
+}
- mRepeatBufferDeferred = false;
- ++mRepeatLastFrameGeneration;
+bool GraphicBufferSource::areWeDiscardingAvailableBuffers_l() {
+ return mEndOfStreamSent // already sent EOS to codec
+ || mOMXNode == nullptr // there is no codec connected
+ || (mSuspended && mActionQueue.empty()) // we are suspended and not waiting for
+ // any further action
+ || !mExecuting;
+}
- if (mExecuting) {
- fillCodecBuffer_l();
+void GraphicBufferSource::onBufferAcquired_l(const VideoBuffer &buffer) {
+ if (mEndOfStreamSent) {
+ // This should only be possible if a new buffer was queued after
+ // EOS was signaled, i.e. the app is misbehaving.
+ ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
+ } else if (mOMXNode == NULL || (mSuspended && mActionQueue.empty())) {
+ // FIXME: if we are suspended but have a resume queued we will stop repeating the last
+ // frame. Is that the desired behavior?
+ ALOGV("onFrameAvailable: suspended, ignoring frame");
+ } else {
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
+ mAvailableBuffers.push_back(buffer);
+ if (mExecuting) {
+ fillCodecBuffer_l();
+ }
}
}
@@ -805,25 +957,55 @@
for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
if ((slotMask & 0x01) != 0) {
- // Last buffer (if set) is always acquired even if its use count
- // is 0, because we could have skipped that frame but kept it for
- // repeating. Otherwise a buffer is only acquired if use count>0.
- if (mBufferSlot[i] != NULL &&
- (mBufferUseCount[i] > 0 || mLatestBufferId == i)) {
- ALOGV("releasing acquired buffer: slot=%d, useCount=%d, latest=%d",
- i, mBufferUseCount[i], mLatestBufferId);
- mNumBufferAcquired--;
- }
- if (mLatestBufferId == i) {
- mLatestBufferId = -1;
- }
- mBufferSlot[i] = NULL;
- mBufferUseCount[i] = 0;
+ discardBufferInSlot_l(i);
}
slotMask >>= 1;
}
}
+void GraphicBufferSource::discardBufferInSlot_l(GraphicBufferSource::slot_id i) {
+ ssize_t bsi = mBufferSlots.indexOfKey(i);
+ if (bsi < 0) {
+ ALOGW("releasing an unpopulated slot: %d", i);
+ } else {
+ discardBufferAtSlotIndex_l(bsi);
+ mBufferSlots.removeItemsAt(bsi);
+ }
+}
+
+void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t bsi) {
+ const std::shared_ptr<CachedBuffer>& buffer = mBufferSlots.valueAt(bsi);
+ // use -2 if there is no latest buffer, and -1 if it is no longer cached
+ slot_id latestBufferSlot =
+ mLatestBuffer.mBuffer == nullptr ? -2 : mLatestBuffer.mBuffer->getSlot();
+ ALOGV("releasing acquired buffer: [slot=%d, useCount=%ld], latest: [slot=%d]",
+ mBufferSlots.keyAt(bsi), buffer.use_count(), latestBufferSlot);
+ mBufferSlots.valueAt(bsi)->onDroppedFromCache();
+
+ // If the slot of an acquired buffer is discarded, that buffer will not have to be
+ // released to the producer, so account it here. However, it is possible that the
+ // acquired buffer has already been discarded so check if it still is.
+ if (buffer->isAcquired()) {
+ --mNumOutstandingAcquires;
+ }
+
+ // clear the buffer reference (not technically needed as caller either replaces or deletes
+ // it; done here for safety).
+ mBufferSlots.editValueAt(bsi).reset();
+ CHECK_DBG(buffer == nullptr);
+}
+
+void GraphicBufferSource::releaseAllAvailableBuffers_l() {
+ mAvailableBuffers.clear();
+ while (mNumAvailableUnacquiredBuffers > 0) {
+ VideoBuffer item;
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGW("releaseAllAvailableBuffers: failed to acquire available unacquired buffer");
+ break;
+ }
+ }
+}
+
// BufferQueue::ConsumerListener callback
void GraphicBufferSource::onSidebandStreamChanged() {
ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
@@ -867,28 +1049,27 @@
mConsumer->setConsumerUsageBits(consumerUsage);
// Sets the default buffer data space
- ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumBufferAcquired);
+ ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumOutstandingAcquires);
mConsumer->setDefaultBufferDataSpace((android_dataspace)dataSpace);
- mLastDataSpace = (android_dataspace)dataSpace;
+ mLastDataspace = (android_dataspace)dataSpace;
mExecuting = false;
mSuspended = false;
mEndOfStream = false;
mEndOfStreamSent = false;
- mPrevOriginalTimeUs = -1ll;
mSkipFramesBeforeNs = -1ll;
- mRepeatAfterUs = -1ll;
+ mFrameRepeatIntervalUs = -1ll;
mRepeatLastFrameGeneration = 0;
- mRepeatLastFrameTimestamp = -1ll;
- mRepeatLastFrameCount = 0;
- mLatestBufferId = -1;
- mLatestBufferFrameNum = 0;
- mLatestBufferFence = Fence::NO_FENCE;
- mRepeatBufferDeferred = false;
- mTimePerCaptureUs = -1ll;
- mTimePerFrameUs = -1ll;
+ mOutstandingFrameRepeatCount = 0;
+ mLatestBuffer.mBuffer.reset();
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ mFps = -1.0;
+ mCaptureFps = -1.0;
+ mBaseCaptureUs = -1ll;
+ mBaseFrameUs = -1ll;
mPrevCaptureUs = -1ll;
mPrevFrameUs = -1ll;
+ mFrameCount = 0;
mInputBufferTimeOffsetUs = 0;
mStopTimeUs = -1;
mActionQueue.clear();
@@ -930,20 +1111,15 @@
} else {
if (suspend) {
mSuspended = true;
-
- if (!releaseAllBuffers()) {
- ALOGW("Failed to release all the buffers during suspend");
- }
+ releaseAllAvailableBuffers_l();
return OK;
} else {
-
mSuspended = false;
-
- if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
+ if (mExecuting && !haveAvailableBuffers_l()
+ && mFrameRepeatBlockedOnCodecBuffer) {
if (repeatLatestBuffer_l()) {
ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
-
- mRepeatBufferDeferred = false;
+ mFrameRepeatBlockedOnCodecBuffer = false;
} else {
ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
}
@@ -953,23 +1129,6 @@
return OK;
}
-bool GraphicBufferSource::releaseAllBuffers() {
- while (mNumFramesAvailable > 0) {
- BufferItem item;
- status_t err = acquireBuffer(&item);
-
- if (err != OK) {
- ALOGE("releaseAllBuffers: acquireBuffer fail returned err=%d", err);
- return false;;
- }
-
- --mNumFramesAvailable;
-
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
- }
- return true;
-}
-
status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
@@ -979,7 +1138,7 @@
return INVALID_OPERATION;
}
- mRepeatAfterUs = repeatAfterUs;
+ mFrameRepeatIntervalUs = repeatAfterUs;
return OK;
}
@@ -1055,25 +1214,25 @@
return OK;
}
-status_t GraphicBufferSource::setTimeLapseConfig(int64_t timePerFrameUs, int64_t timePerCaptureUs) {
- ALOGV("setTimeLapseConfig: timePerFrameUs=%lld, timePerCaptureUs=%lld",
- (long long)timePerFrameUs, (long long)timePerCaptureUs);
+status_t GraphicBufferSource::setTimeLapseConfig(double fps, double captureFps) {
+ ALOGV("setTimeLapseConfig: fps=%lg, captureFps=%lg",
+ fps, captureFps);
Mutex::Autolock autoLock(mMutex);
- if (mExecuting || timePerFrameUs <= 0ll || timePerCaptureUs <= 0ll) {
+ if (mExecuting || !(fps > 0) || !(captureFps > 0)) {
return INVALID_OPERATION;
}
- mTimePerFrameUs = timePerFrameUs;
- mTimePerCaptureUs = timePerCaptureUs;
+ mFps = fps;
+ mCaptureFps = captureFps;
return OK;
}
status_t GraphicBufferSource::setColorAspects(int32_t aspectsPacked) {
Mutex::Autolock autoLock(mMutex);
- mColorAspectsPacked = aspectsPacked;
+ mDefaultColorAspectsPacked = aspectsPacked;
ColorAspects colorAspects = ColorUtils::unpackToColorAspects(aspectsPacked);
ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
colorAspects.mRange, asString(colorAspects.mRange),
@@ -1086,8 +1245,8 @@
status_t GraphicBufferSource::signalEndOfInputStream() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
- mExecuting, mNumFramesAvailable, mEndOfStream);
+ ALOGV("signalEndOfInputStream: executing=%d available=%zu+%d eos=%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
if (mEndOfStream) {
ALOGE("EOS was already signaled");
@@ -1104,7 +1263,7 @@
// stall since no future events are expected.
mEndOfStream = true;
- if (mStopTimeUs == -1 && mExecuting && mNumFramesAvailable == 0) {
+ if (mStopTimeUs == -1 && mExecuting && !haveAvailableBuffers_l()) {
submitEndOfInputStream_l();
}
@@ -1125,17 +1284,16 @@
break;
}
- if (!mExecuting || mNumFramesAvailable > 0) {
+ if (!mExecuting || haveAvailableBuffers_l()) {
break;
}
bool success = repeatLatestBuffer_l();
-
if (success) {
ALOGV("repeatLatestBuffer_l SUCCESS");
} else {
ALOGV("repeatLatestBuffer_l FAILURE");
- mRepeatBufferDeferred = true;
+ mFrameRepeatBlockedOnCodecBuffer = true;
}
break;
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index ab52ce2..3df1aa1 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -41,7 +41,8 @@
struct FrameDropper;
/*
- * This class is used to feed OMX codecs from a Surface via BufferQueue.
+ * This class is used to feed OMX codecs from a Surface via BufferQueue or
+ * HW producer.
*
* Instances of the class don't run on a dedicated thread. Instead,
* various events trigger data movement:
@@ -55,6 +56,22 @@
* Frames of data (and, perhaps, the end-of-stream indication) can arrive
* before the codec is in the "executing" state, so we need to queue
* things up until we're ready to go.
+ *
+ * The GraphicBufferSource can be configure dynamically to discard frames
+ * from the source:
+ *
+ * - if their timestamp is less than a start time
+ * - if the source is suspended or stopped and the suspend/stop-time is reached
+ * - if EOS was signaled
+ * - if there is no encoder connected to it
+ *
+ * The source, furthermore, may choose to not encode (drop) frames if:
+ *
+ * - to throttle the frame rate (keep it under a certain limit)
+ *
+ * Finally the source may optionally hold onto the last non-discarded frame
+ * (even if it was dropped) to reencode it after an interval if no further
+ * frames are sent by the producer.
*/
class GraphicBufferSource : public BufferQueue::ConsumerListener {
public:
@@ -74,6 +91,9 @@
return mProducer;
}
+ // OmxBufferSource interface
+ // ------------------------------
+
// This is called when OMX transitions to OMX_StateExecuting, which means
// we can start handing it buffers. If we already have buffers of data
// sitting in the BufferQueue, this will send them to the codec.
@@ -91,12 +111,14 @@
// A "codec buffer", i.e. a buffer that can be used to pass data into
// the encoder, has been allocated. (This call does not call back into
// OMXNodeInstance.)
- Status onInputBufferAdded(int32_t bufferID);
+ Status onInputBufferAdded(int32_t bufferId);
// Called from OnEmptyBufferDone. If we have a BQ buffer available,
// fill it with a new frame of data; otherwise, just mark it as available.
- Status onInputBufferEmptied(
- int32_t bufferID, int fenceFd);
+ Status onInputBufferEmptied(int32_t bufferId, int fenceFd);
+
+ // IGraphicBufferSource interface
+ // ------------------------------
// Configure the buffer source to be used with an OMX node with the default
// data space.
@@ -140,7 +162,7 @@
// Sets the time lapse (or slow motion) parameters.
// When set, the sample's timestamp will be modified to playback framerate,
// and capture timestamp will be modified to capture rate.
- status_t setTimeLapseConfig(int64_t timePerFrameUs, int64_t timePerCaptureUs);
+ status_t setTimeLapseConfig(double fps, double captureFps);
// Sets the start time us (in system time), samples before which should
// be dropped and not submitted to encoder
@@ -154,6 +176,9 @@
status_t setColorAspects(int32_t aspectsPacked);
protected:
+ // BQ::ConsumerListener interface
+ // ------------------------------
+
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
// available, we acquire the buffer, copy the GraphicBuffer reference
@@ -173,71 +198,136 @@
void onSidebandStreamChanged() override;
private:
-
- // Keep track of codec input buffers. They may either be available
- // (mGraphicBuffer == NULL) or in use by the codec.
- struct CodecBuffer {
- IOMX::buffer_id mBufferID;
-
- // buffer producer's frame-number for buffer
- uint64_t mFrameNumber;
-
- // buffer producer's buffer slot for buffer
- int mSlot;
-
- sp<GraphicBuffer> mGraphicBuffer;
- };
-
- // Returns the index of an available codec buffer. If none are
- // available, returns -1. Mutex must be held by caller.
- int findAvailableCodecBuffer_l();
-
- // Returns true if a codec buffer is available.
- bool isCodecBufferAvailable_l() {
- return findAvailableCodecBuffer_l() >= 0;
- }
-
- // Finds the mCodecBuffers entry that matches. Returns -1 if not found.
- int findMatchingCodecBuffer_l(IOMX::buffer_id bufferID);
-
- // Fills a codec buffer with a frame from the BufferQueue. This must
- // only be called when we know that a frame of data is ready (i.e. we're
- // in the onFrameAvailable callback, or if we're in codecBufferEmptied
- // and mNumFramesAvailable is nonzero). Returns without doing anything if
- // we don't have a codec buffer available.
- //
- // Returns true if we successfully filled a codec buffer with a BQ buffer.
- bool fillCodecBuffer_l();
-
- // Marks the mCodecBuffers entry as in-use, copies the GraphicBuffer
- // reference into the codec buffer, and submits the data to the codec.
- status_t submitBuffer_l(const BufferItem &item, int cbi);
-
- // Submits an empty buffer, with the EOS flag set. Returns without
- // doing anything if we don't have a codec buffer available.
- void submitEndOfInputStream_l();
-
- // Acquire buffer from the consumer
- status_t acquireBuffer(BufferItem *bi);
-
- bool releaseAllBuffers();
-
- // Release buffer to the consumer
- void releaseBuffer(int id, uint64_t frameNum, const sp<Fence> &fence);
-
- void setLatestBuffer_l(const BufferItem &item);
- bool repeatLatestBuffer_l();
- bool getTimestamp(const BufferItem &item, int64_t *codecTimeUs);
-
- // called when the data space of the input buffer changes
- void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
-
// Lock, covers all member variables.
mutable Mutex mMutex;
// Used to report constructor failure.
status_t mInitCheck;
+ // Graphic buffer reference objects
+ // --------------------------------
+
+ // These are used to keep a shared reference to GraphicBuffers and gralloc handles owned by the
+ // GraphicBufferSource as well as to manage the cache slots. Separate references are owned by
+ // the buffer cache (controlled by the buffer queue/buffer producer) and the codec.
+
+ // When we get a buffer from the producer (BQ) it designates them to be cached into specific
+ // slots. Each slot owns a shared reference to the graphic buffer (we track these using
+ // CachedBuffer) that is in that slot, but the producer controls the slots.
+ struct CachedBuffer;
+
+ // When we acquire a buffer, we must release it back to the producer once we (or the codec)
+ // no longer uses it (as long as the buffer is still in the cache slot). We use shared
+ // AcquiredBuffer instances for this purpose - and we call release buffer when the last
+ // reference is relinquished.
+ struct AcquiredBuffer;
+
+ // We also need to keep some extra metadata (other than the buffer reference) for acquired
+ // buffers. These are tracked in VideoBuffer struct.
+ struct VideoBuffer {
+ std::shared_ptr<AcquiredBuffer> mBuffer;
+ nsecs_t mTimestampNs;
+ android_dataspace_t mDataspace;
+ };
+
+ // Cached and aquired buffers
+ // --------------------------------
+
+ typedef int slot_id;
+
+ // Maps a slot to the cached buffer in that slot
+ KeyedVector<slot_id, std::shared_ptr<CachedBuffer>> mBufferSlots;
+
+ // Queue of buffers acquired in chronological order that are not yet submitted to the codec
+ List<VideoBuffer> mAvailableBuffers;
+
+ // Number of buffers that have been signaled by the producer that they are available, but
+ // we've been unable to acquire them due to our max acquire count
+ int32_t mNumAvailableUnacquiredBuffers;
+
+ // Number of frames acquired from consumer (debug only)
+ // (as in aquireBuffer called, and release needs to be called)
+ int32_t mNumOutstandingAcquires;
+
+ // Acquire a buffer from the BQ and store it in |item| if successful
+ // \return OK on success, or error on failure.
+ status_t acquireBuffer_l(VideoBuffer *item);
+
+ // Called when a buffer was acquired from the producer
+ void onBufferAcquired_l(const VideoBuffer &buffer);
+
+ // marks the buffer at the slot no longer cached, and accounts for the outstanding
+ // acquire count
+ void discardBufferInSlot_l(slot_id i);
+
+ // marks the buffer at the slot index no longer cached, and accounts for the outstanding
+ // acquire count
+ void discardBufferAtSlotIndex_l(ssize_t bsi);
+
+ // release all acquired and unacquired available buffers
+ // This method will return if it fails to acquire an unacquired available buffer, which will
+ // leave mNumAvailableUnacquiredBuffers positive on return.
+ void releaseAllAvailableBuffers_l();
+
+ // returns whether we have any available buffers (acquired or not-yet-acquired)
+ bool haveAvailableBuffers_l() const {
+ return !mAvailableBuffers.empty() || mNumAvailableUnacquiredBuffers > 0;
+ }
+
+ // Codec buffers
+ // -------------
+
+ // When we queue buffers to the encoder, we must hold the references to the graphic buffers
+ // in those buffers - as the producer may free the slots.
+
+ typedef int32_t codec_buffer_id;
+
+ // set of codec buffer ID-s of buffers available to fill
+ List<codec_buffer_id> mFreeCodecBuffers;
+
+ // maps codec buffer ID-s to buffer info submitted to the codec. Used to keep a reference for
+ // the graphics buffer.
+ KeyedVector<codec_buffer_id, std::shared_ptr<AcquiredBuffer>> mSubmittedCodecBuffers;
+
+ // Processes the next acquired frame. If there is no available codec buffer, it returns false
+ // without any further action.
+ //
+ // Otherwise, it consumes the next acquired frame and determines if it needs to be discarded or
+ // dropped. If neither are needed, it submits it to the codec. It also saves the latest
+ // non-dropped frame and submits it for repeat encoding (if this is enabled).
+ //
+ // \require there must be an acquired frame (i.e. we're in the onFrameAvailable callback,
+ // or if we're in codecBufferEmptied and mNumFramesAvailable is nonzero).
+ // \require codec must be executing
+ // \returns true if acquired (and handled) the next frame. Otherwise, false.
+ bool fillCodecBuffer_l();
+
+ // Calculates the media timestamp for |item| and on success it submits the buffer to the codec,
+ // while also keeping a reference for it in mSubmittedCodecBuffers.
+ // Returns UNKNOWN_ERROR if the buffer was not submitted due to buffer timestamp. Otherwise,
+ // it returns any submit success or error value returned by the codec.
+ status_t submitBuffer_l(const VideoBuffer &item);
+
+ // Submits an empty buffer, with the EOS flag set if there is an available codec buffer and
+ // sets mEndOfStreamSent flag. Does nothing if there is no codec buffer available.
+ void submitEndOfInputStream_l();
+
+ // Set to true if we want to send end-of-stream after we run out of available frames from the
+ // producer
+ bool mEndOfStream;
+
+ // Flag that the EOS was submitted to the encoder
+ bool mEndOfStreamSent;
+
+ // Dataspace for the last frame submitted to the codec
+ android_dataspace mLastDataspace;
+
+ // Default color aspects for this source
+ int32_t mDefaultColorAspectsPacked;
+
+ // called when the data space of the input buffer changes
+ void onDataspaceChanged_l(android_dataspace dataspace, android_pixel_format pixelFormat);
+
// Pointer back to the Omx node that created us. We send buffers here.
sp<IOmxNodeWrapper> mOMXNode;
@@ -246,11 +336,9 @@
bool mSuspended;
- // The time to stop sending buffers.
- int64_t mStopTimeUs;
-
- // Last dataspace seen
- android_dataspace mLastDataSpace;
+ // returns true if this source is unconditionally discarding acquired buffers at the moment
+ // regardless of the metadata of those buffers
+ bool areWeDiscardingAvailableBuffers_l();
// Our BufferQueue interfaces. mProducer is passed to the producer through
// getIGraphicBufferProducer, and mConsumer is used internally to retrieve
@@ -258,26 +346,8 @@
sp<IGraphicBufferProducer> mProducer;
sp<IGraphicBufferConsumer> mConsumer;
- // Number of frames pending in BufferQueue that haven't yet been
- // forwarded to the codec.
- size_t mNumFramesAvailable;
-
- // Number of frames acquired from consumer (debug only)
- int32_t mNumBufferAcquired;
-
- // Set to true if we want to send end-of-stream after we run out of
- // frames in BufferQueue.
- bool mEndOfStream;
- bool mEndOfStreamSent;
-
- // Cache of GraphicBuffers from the buffer queue. When the codec
- // is done processing a GraphicBuffer, we can use this to map back
- // to a slot number.
- sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
- int32_t mBufferUseCount[BufferQueue::NUM_BUFFER_SLOTS];
-
- // Tracks codec buffers.
- Vector<CodecBuffer> mCodecBuffers;
+ // The time to stop sending buffers.
+ int64_t mStopTimeUs;
struct ActionItem {
typedef enum {
@@ -302,13 +372,12 @@
friend struct AHandlerReflector<GraphicBufferSource>;
enum {
- kWhatRepeatLastFrame,
+ kWhatRepeatLastFrame, ///< queue last frame for reencoding
};
enum {
kRepeatLastFrameCount = 10,
};
- int64_t mPrevOriginalTimeUs;
int64_t mSkipFramesBeforeNs;
sp<FrameDropper> mFrameDropper;
@@ -316,28 +385,86 @@
sp<ALooper> mLooper;
sp<AHandlerReflector<GraphicBufferSource> > mReflector;
- int64_t mRepeatAfterUs;
- int32_t mRepeatLastFrameGeneration;
- int64_t mRepeatLastFrameTimestamp;
- int32_t mRepeatLastFrameCount;
+ // Repeat last frame feature
+ // -------------------------
+ // configuration parameter: repeat interval for frame repeating (<0 if repeating is disabled)
+ int64_t mFrameRepeatIntervalUs;
- int mLatestBufferId;
- uint64_t mLatestBufferFrameNum;
- sp<Fence> mLatestBufferFence;
+ // current frame repeat generation - used to cancel a pending frame repeat
+ int32_t mRepeatLastFrameGeneration;
+
+ // number of times to repeat latest frame (0 = none)
+ int32_t mOutstandingFrameRepeatCount;
// The previous buffer should've been repeated but
// no codec buffer was available at the time.
- bool mRepeatBufferDeferred;
+ bool mFrameRepeatBlockedOnCodecBuffer;
+
+ // hold a reference to the last acquired (and not discarded) frame for frame repeating
+ VideoBuffer mLatestBuffer;
+
+ // queue last frame for reencode after the repeat interval.
+ void queueFrameRepeat_l();
+
+ // save |item| as the latest buffer and queue it for reencode (repeat)
+ void setLatestBuffer_l(const VideoBuffer &item);
+
+ // submit last frame to encoder and queue it for reencode
+ // \return true if buffer was submitted, false if it wasn't (e.g. source is suspended, there
+ // is no available codec buffer)
+ bool repeatLatestBuffer_l();
// Time lapse / slow motion configuration
- int64_t mTimePerCaptureUs;
- int64_t mTimePerFrameUs;
+ // --------------------------------------
+
+ // desired frame rate for encoding - value <= 0 if undefined
+ double mFps;
+
+ // desired frame rate for capture - value <= 0 if undefined
+ double mCaptureFps;
+
+ // Time lapse mode is enabled if the capture frame rate is defined and it is
+ // smaller than half the encoding frame rate (if defined). In this mode,
+ // frames that come in between the capture interval (the reciprocal of the
+ // capture frame rate) are dropped and the encoding timestamp is adjusted to
+ // match the desired encoding frame rate.
+ //
+ // Slow motion mode is enabled if both encoding and capture frame rates are
+ // defined and the encoding frame rate is less than half the capture frame
+ // rate. In this mode, the source is expected to produce frames with an even
+ // timestamp interval (after rounding) with the configured capture fps. The
+ // first source timestamp is used as the source base time. Afterwards, the
+ // timestamp of each source frame is snapped to the nearest expected capture
+ // timestamp and scaled to match the configured encoding frame rate.
+
+ // These modes must be enabled before using this source.
+
+ // adjusted capture timestamp of the base frame
+ int64_t mBaseCaptureUs;
+
+ // adjusted encoding timestamp of the base frame
+ int64_t mBaseFrameUs;
+
+ // number of frames from the base time
+ int64_t mFrameCount;
+
+ // adjusted capture timestamp for previous frame (negative if there were
+ // none)
int64_t mPrevCaptureUs;
+
+ // adjusted media timestamp for previous frame (negative if there were none)
int64_t mPrevFrameUs;
+ // desired offset between media time and capture time
int64_t mInputBufferTimeOffsetUs;
- int32_t mColorAspectsPacked;
+ // Calculates and outputs the timestamp to use for a buffer with a specific buffer timestamp
+ // |bufferTimestampNs|. Returns false on failure (buffer too close or timestamp is moving
+ // backwards). Otherwise, stores the media timestamp in |*codecTimeUs| and returns true.
+ //
+ // This method takes into account the start time offset and any time lapse or slow motion time
+ // adjustment requests.
+ bool calculateCodecTimestamp_l(nsecs_t bufferTimeNs, int64_t *codecTimeUs);
void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/hal/1.0/impl/Android.mk b/media/libstagefright/omx/hal/1.0/impl/Android.mk
deleted file mode 100644
index 79cb1fa..0000000
--- a/media/libstagefright/omx/hal/1.0/impl/Android.mk
+++ /dev/null
@@ -1,45 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := android.hardware.media.omx@1.0-impl
-LOCAL_SRC_FILES := \
- WGraphicBufferSource.cpp \
- WOmxBufferProducer.cpp \
- WOmxBufferSource.cpp \
- WOmxNode.cpp \
- WOmxObserver.cpp \
- WOmxProducerListener.cpp \
- Omx.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libmedia \
- libstagefright_foundation \
- libstagefright_omx \
- libui \
- libgui \
- libhidlbase \
- libhidltransport \
- libhwbinder \
- libhidlmemory \
- libutils \
- libcutils \
- libbinder \
- liblog \
- libbase \
- android.hardware.media.omx@1.0 \
- android.hardware.graphics.common@1.0 \
- android.hardware.media@1.0 \
- android.hidl.base@1.0 \
-
-LOCAL_C_FLAGS += \
- -Wno-unused-parameter \
- -Werror
-
-LOCAL_C_INCLUDES += \
- $(TOP)/frameworks/av/include \
- $(TOP)/frameworks/av/media/libstagefright \
- $(TOP)/frameworks/native/include \
- $(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/frameworks/native/include/media/hardware \
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 565a2fe..c78002c 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -520,7 +520,16 @@
// When receiving files, the incoming length is given in 32 bits.
// A >4G file is given as 0xFFFFFFFF
uint32_t file_length = mfr.length;
- uint64_t offset = lseek(mfr.fd, 0, SEEK_CUR);
+ uint64_t offset = mfr.offset;
+ struct usb_endpoint_descriptor mBulkOut_desc;
+ int packet_size;
+
+ if (ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&mBulkOut_desc))) {
+ PLOG(ERROR) << "Could not get FFS bulk-out descriptor";
+ packet_size = MAX_PACKET_SIZE_HS;
+ } else {
+ packet_size = mBulkOut_desc.wMaxPacketSize;
+ }
char *data = mBuffer1.data();
char *data2 = mBuffer2.data();
@@ -573,21 +582,26 @@
}
if (read) {
- // Enqueue a new write request
- aio.aio_buf = data;
- aio.aio_sink = mfr.fd;
- aio.aio_offset = offset;
- aio.aio_nbytes = ret;
- aio_write(&aio);
-
if (file_length == MAX_MTP_FILE_SIZE) {
// For larger files, receive until a short packet is received.
if (static_cast<size_t>(ret) < length) {
file_length = 0;
}
} else {
+ // Receive an empty packet if size is a multiple of the endpoint size.
file_length -= ret;
+ if (file_length == 0 && ret % packet_size == 0) {
+ if (TEMP_FAILURE_RETRY(::read(mBulkOut, data, packet_size)) != 0) {
+ return -1;
+ }
+ }
}
+ // Enqueue a new write request
+ aio.aio_buf = data;
+ aio.aio_sink = mfr.fd;
+ aio.aio_offset = offset;
+ aio.aio_nbytes = ret;
+ aio_write(&aio);
offset += ret;
std::swap(data, data2);
@@ -695,9 +709,11 @@
}
}
- if (given_length == MAX_MTP_FILE_SIZE && ret % packet_size == 0) {
+ if (ret % packet_size == 0) {
// If the last packet wasn't short, send a final empty packet
- if (writeHandle(mBulkIn, data, 0) == -1) return -1;
+ if (TEMP_FAILURE_RETRY(::write(mBulkIn, data, 0)) != 0) {
+ return -1;
+ }
}
return 0;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ce6354d..e3a23f9 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1040,13 +1040,7 @@
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
outputDesc->mDirectClientUid = clientUid;
-
- audio_io_handle_t srcOutput = getOutputForEffect();
addOutput(output, outputDesc);
- audio_io_handle_t dstOutput = getOutputForEffect();
- if (dstOutput == output) {
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
- }
mPreviousOutputs = mOutputs;
ALOGV("getOutput() returns new direct output %d", output);
mpClientInterface->onAudioPortListUpdate();
@@ -1254,11 +1248,16 @@
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);
+ if (stream == AUDIO_STREAM_MUSIC) {
+ selectOutputForMusicEffects();
+ }
+
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
// starting an output being rerouted?
if (device == AUDIO_DEVICE_NONE) {
device = getNewOutputDevice(outputDesc, false /*fromCache*/);
}
+
routing_strategy strategy = getStrategy(stream);
bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
(strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
@@ -1411,6 +1410,9 @@
// update the outputs if stopping one with a stream that can affect notification routing
handleNotificationRoutingForStream(stream);
}
+ if (stream == AUDIO_STREAM_MUSIC) {
+ selectOutputForMusicEffects();
+ }
return NO_ERROR;
} else {
ALOGW("stopOutput() refcount is already 0");
@@ -1454,13 +1456,6 @@
}
if (--desc->mDirectOpenCount == 0) {
closeOutput(output);
- // If effects where present on the output, audioflinger moved them to the primary
- // output by default: move them back to the appropriate output.
- audio_io_handle_t dstOutput = getOutputForEffect();
- if (hasPrimaryOutput() && dstOutput != mPrimaryOutput->mIoHandle) {
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX,
- mPrimaryOutput->mIoHandle, dstOutput);
- }
mpClientInterface->onAudioPortListUpdate();
}
}
@@ -1633,6 +1628,8 @@
isSoundTrigger,
policyMix, mpClientInterface);
+// FIXME: disable concurrent capture until UI is ready
+#if 0
// reuse an open input if possible
sp<AudioInputDescriptor> reusedInputDesc;
for (size_t i = 0; i < mInputs.size(); i++) {
@@ -1695,6 +1692,7 @@
releaseInput(reusedInputDesc->mIoHandle, currentSession);
}
}
+#endif
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = profileSamplingRate;
@@ -1800,6 +1798,8 @@
return BAD_VALUE;
}
+// FIXME: disable concurrent capture until UI is ready
+#if 0
if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
ALOGW("startInput(%d) failed: other input already started", input);
return INVALID_OPERATION;
@@ -1811,6 +1811,70 @@
if (mInputs.activeInputsCountOnDevices() != 0) {
*concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
}
+#else
+ if (!is_virtual_input_device(inputDesc->mDevice)) {
+ if (mCallTxPatch != 0 &&
+ inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+ ALOGW("startInput(%d) failed: call in progress", input);
+ return INVALID_OPERATION;
+ }
+
+ Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+
+ if (is_virtual_input_device(activeDesc->mDevice)) {
+ continue;
+ }
+
+ audio_source_t activeSource = activeDesc->inputSource(true);
+ if (audioSession->inputSource() == AUDIO_SOURCE_HOTWORD) {
+ if (activeSource == AUDIO_SOURCE_HOTWORD) {
+ if (activeDesc->hasPreemptedSession(session)) {
+ ALOGW("startInput(%d) failed for HOTWORD: "
+ "other input %d already started for HOTWORD",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
+ } else {
+ ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
+ } else {
+ if (activeSource != AUDIO_SOURCE_HOTWORD) {
+ ALOGW("startInput(%d) failed: other input %d already started",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
+ }
+ }
+
+ // if capture is allowed, preempt currently active HOTWORD captures
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+
+ if (is_virtual_input_device(activeDesc->mDevice)) {
+ continue;
+ }
+
+ audio_source_t activeSource = activeDesc->inputSource(true);
+ if (activeSource == AUDIO_SOURCE_HOTWORD) {
+ AudioSessionCollection activeSessions =
+ activeDesc->getAudioSessions(true /*activeOnly*/);
+ audio_session_t activeSession = activeSessions.keyAt(0);
+ audio_io_handle_t activeHandle = activeDesc->mIoHandle;
+ SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+ sessions.add(activeSession);
+ inputDesc->setPreemptedSessions(sessions);
+ stopInput(activeHandle, activeSession);
+ releaseInput(activeHandle, activeSession);
+ ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
+ input, activeDesc->mIoHandle);
+ }
+ }
+ }
+#endif
// increment activity count before calling getNewInputDevice() below as only active sessions
// are considered for device selection
@@ -2116,8 +2180,7 @@
return NO_ERROR;
}
-audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
- const SortedVector<audio_io_handle_t>& outputs)
+audio_io_handle_t AudioPolicyManager::selectOutputForMusicEffects()
{
// select one output among several suitable for global effects.
// The priority is as follows:
@@ -2125,53 +2188,68 @@
// AudioFlinger will invalidate the track and the offloaded output
// will be closed causing the effect to be moved to a PCM output.
// 2: A deep buffer output
- // 3: the first output in the list
-
- if (outputs.size() == 0) {
- return 0;
- }
-
- audio_io_handle_t outputOffloaded = 0;
- audio_io_handle_t outputDeepBuffer = 0;
-
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
- ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags);
- if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- outputOffloaded = outputs[i];
- }
- if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
- outputDeepBuffer = outputs[i];
- }
- }
-
- ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
- outputOffloaded, outputDeepBuffer);
- if (outputOffloaded != 0) {
- return outputOffloaded;
- }
- if (outputDeepBuffer != 0) {
- return outputDeepBuffer;
- }
-
- return outputs[0];
-}
-
-audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
-{
- // apply simple rule where global effects are attached to the same output as MUSIC streams
+ // 3: The primary output
+ // 4: the first output in the list
routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
+ SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
- audio_io_handle_t output = selectOutputForEffects(dstOutputs);
- ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
- output, (desc == NULL) ? "unspecified" : desc->name, (desc == NULL) ? 0 : desc->flags);
+ if (outputs.size() == 0) {
+ return AUDIO_IO_HANDLE_NONE;
+ }
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ bool activeOnly = true;
+
+ while (output == AUDIO_IO_HANDLE_NONE) {
+ audio_io_handle_t outputOffloaded = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
+
+ for (size_t i = 0; i < outputs.size(); i++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
+ continue;
+ }
+ ALOGV("selectOutputForMusicEffects activeOnly %d outputs[%zu] flags 0x%08x",
+ activeOnly, i, desc->mFlags);
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ outputOffloaded = outputs[i];
+ }
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+ outputDeepBuffer = outputs[i];
+ }
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
+ outputPrimary = outputs[i];
+ }
+ }
+ if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
+ output = outputOffloaded;
+ } else if (outputDeepBuffer != AUDIO_IO_HANDLE_NONE) {
+ output = outputDeepBuffer;
+ } else if (outputPrimary != AUDIO_IO_HANDLE_NONE) {
+ output = outputPrimary;
+ } else {
+ output = outputs[0];
+ }
+ activeOnly = false;
+ }
+
+ if (output != mMusicEffectOutput) {
+ mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mMusicEffectOutput, output);
+ mMusicEffectOutput = output;
+ }
+
+ ALOGV("selectOutputForMusicEffects selected output %d", output);
return output;
}
+audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc __unused)
+{
+ return selectOutputForMusicEffects();
+}
+
status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
@@ -3368,7 +3446,8 @@
mBeaconPlayingRefCount(0),
mBeaconMuted(false),
mTtsOutputAvailable(false),
- mMasterMono(false)
+ mMasterMono(false),
+ mMusicEffectOutput(AUDIO_IO_HANDLE_NONE)
{
mUidCached = getuid();
mpClientInterface = clientInterface;
@@ -3813,12 +3892,14 @@
outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
updateMono(output); // update mono status when adding to output list
+ selectOutputForMusicEffects();
nextAudioPortGeneration();
}
void AudioPolicyManager::removeOutput(audio_io_handle_t output)
{
mOutputs.removeItem(output);
+ selectOutputForMusicEffects();
}
void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
@@ -4406,22 +4487,7 @@
// Move effects associated to this strategy from previous output to new output
if (strategy == STRATEGY_MEDIA) {
- audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs);
- SortedVector<audio_io_handle_t> moved;
- for (size_t i = 0; i < mEffects.size(); i++) {
- sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
- if (effectDesc->mSession == AUDIO_SESSION_OUTPUT_MIX &&
- effectDesc->mIo != fxOutput) {
- if (moved.indexOf(effectDesc->mIo) < 0) {
- ALOGV("checkOutputForStrategy() moving effect %d to output %d",
- mEffects.keyAt(i), fxOutput);
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, effectDesc->mIo,
- fxOutput);
- moved.add(effectDesc->mIo);
- }
- effectDesc->mIo = fxOutput;
- }
- }
+ selectOutputForMusicEffects();
}
// Move tracks associated to this strategy from previous output to new output
for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3dfcde6..9e552d7 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -452,7 +452,7 @@
audio_channel_mask_t channelMask,
audio_output_flags_t flags);
- audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
+ audio_io_handle_t selectOutputForMusicEffects();
virtual status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch)
{
@@ -570,6 +570,8 @@
bool mMasterMono; // true if we wish to force all outputs to mono
AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
+ audio_io_handle_t mMusicEffectOutput; // output selected for music effects
+
#ifdef AUDIO_POLICY_TEST
Mutex mLock;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index b2686bf..1c78a08 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -940,7 +940,7 @@
CameraParameters::FALSE);
}
- bool isZslReprocessPresent = false;
+ isZslReprocessPresent = false;
camera_metadata_ro_entry_t availableCapabilities =
staticInfo(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
if (0 < availableCapabilities.count) {
@@ -999,7 +999,7 @@
return NO_INIT;
}
- // Get supported preview fps ranges.
+ // Get supported preview fps ranges, up to default maximum.
Vector<Size> supportedPreviewSizes;
Vector<FpsRange> supportedPreviewFpsRanges;
const Size PREVIEW_SIZE_BOUND = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
@@ -1007,7 +1007,8 @@
if (res != OK) return res;
for (size_t i=0; i < availableFpsRanges.count; i += 2) {
if (!isFpsSupported(supportedPreviewSizes,
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1]) ||
+ availableFpsRanges.data.i32[i+1] > MAX_DEFAULT_FPS) {
continue;
}
FpsRange fpsRange = {availableFpsRanges.data.i32[i], availableFpsRanges.data.i32[i+1]};
@@ -1436,30 +1437,43 @@
*
* Either way, in case of multiple ranges, break the tie by
* selecting the smaller range.
+ *
+ * Always select range within 30fps if one exists.
*/
// all ranges which have previewFps
Vector<Range> candidateRanges;
+ Vector<Range> candidateFastRanges;
for (i = 0; i < availableFrameRates.count; i+=2) {
Range r = {
availableFrameRates.data.i32[i],
availableFrameRates.data.i32[i+1]
};
+ if (!isFpsSupported(availablePreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, r.max)) {
+ continue;
+ }
if (r.min <= previewFps && previewFps <= r.max) {
- candidateRanges.push(r);
+ if (r.max <= MAX_DEFAULT_FPS) {
+ candidateRanges.push(r);
+ } else {
+ candidateFastRanges.push(r);
+ }
}
}
- if (candidateRanges.isEmpty()) {
+ if (candidateRanges.isEmpty() && candidateFastRanges.isEmpty()) {
ALOGE("%s: Requested preview frame rate %d is not supported",
__FUNCTION__, previewFps);
return BAD_VALUE;
}
- // most applicable range with targetFps
- Range bestRange = candidateRanges[0];
- for (i = 1; i < candidateRanges.size(); ++i) {
- Range r = candidateRanges[i];
+ // most applicable range with targetFps
+ Vector<Range>& ranges =
+ candidateRanges.size() > 0 ? candidateRanges : candidateFastRanges;
+ Range bestRange = ranges[0];
+ for (i = 1; i < ranges.size(); ++i) {
+ Range r = ranges[i];
// Find by largest minIndex in recording mode
if (validatedParams.recordingHint) {
if (r.min > bestRange.min) {
@@ -1977,6 +1991,19 @@
paramsFlattened = newParams.flatten();
params = newParams;
+ slowJpegMode = false;
+ Size pictureSize = { pictureWidth, pictureHeight };
+ int64_t minFrameDurationNs = getJpegStreamMinFrameDurationNs(pictureSize);
+ if (previewFpsRange[1] > 1e9/minFrameDurationNs + FPS_MARGIN) {
+ slowJpegMode = true;
+ }
+ if (slowJpegMode || property_get_bool("camera.disable_zsl_mode", false)) {
+ allowZslMode = false;
+ } else {
+ allowZslMode = isZslReprocessPresent;
+ }
+ ALOGV("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
+
return OK;
}
@@ -2984,7 +3011,6 @@
}
// Get min frame duration for each size and check if the given fps range can be supported.
- const int32_t FPS_MARGIN = 1;
for (size_t i = 0 ; i < sizes.size(); i++) {
int64_t minFrameDuration = getMinFrameDurationNs(sizes[i], format);
if (minFrameDuration <= 0) {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 507de75..bea867a 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -173,6 +173,8 @@
// Whether the jpeg stream is slower than 30FPS and can slow down preview.
// When slowJpegMode is true, allowZslMode must be false to avoid slowing down preview.
bool slowJpegMode;
+ // Whether ZSL reprocess is supported by the device.
+ bool isZslReprocessPresent;
// Overall camera state
enum State {
@@ -199,6 +201,10 @@
static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
// Threshold for slow jpeg mode
static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
+ // Margin for checking FPS
+ static const int32_t FPS_MARGIN = 1;
+ // Max FPS for default parameters
+ static const int32_t MAX_DEFAULT_FPS = 30;
// Full static camera info, object owned by someone else, such as
// Camera2Device.
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index a77a90b7..f2e8df8 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1269,6 +1269,13 @@
surfaceId++;
}
+ // Gracefully handle case where finalizeOutputConfigurations is called
+ // without any new surface.
+ if (consumerSurfaces.size() == 0) {
+ mStreamInfoMap[streamId].finalized = true;
+ return res;
+ }
+
// Finish the deferred stream configuration with the surface.
status_t err;
err = mDevice->setConsumerSurfaces(streamId, consumerSurfaces);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 56ba5b6..f3a81cb 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -912,8 +912,15 @@
if (s == Status::OK) {
camera_metadata_t *buffer =
reinterpret_cast<camera_metadata_t*>(metadata.data());
- set_camera_metadata_vendor_id(buffer, mProviderTagid);
- mCameraCharacteristics = buffer;
+ size_t expectedSize = metadata.size();
+ int res = validate_camera_metadata_structure(buffer, &expectedSize);
+ if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
+ set_camera_metadata_vendor_id(buffer, mProviderTagid);
+ mCameraCharacteristics = buffer;
+ } else {
+ ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+ status = Status::INTERNAL_ERROR;
+ }
}
});
if (!ret.isOk()) {
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
index 0fe09d9..9df7cd4 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -17,6 +17,7 @@
//#define LOG_NDEBUG 0
#include <inttypes.h>
+#include <media/hardware/HardwareAPI.h> // For VideoNativeHandleMetadata
#include "CameraHardwareInterface.h"
namespace android {
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 4bd879f..907065f 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -317,16 +317,6 @@
const camera_memory_t *data, unsigned index,
void *user);
- // TODO: b/35625849
- // Meta data buffer layout for passing a native_handle to codec
- // matching frameworks/native/include/media/hardware/MetadataBufferType.h and
- // frameworks/native/include/media/hardware/HardwareAPI.h
- struct VideoNativeHandleMetadata {
- static const uint32_t kMetadataBufferTypeNativeHandleSource = 3;
- uint32_t eType; // must be kMetadataBufferTypeNativeHandleSource
- native_handle_t* pHandle;
- };
-
// This is a utility class that combines a MemoryHeapBase and a MemoryBase
// in one. Since we tend to use them in a one-to-one relationship, this is
// handy.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 47c7e3f..b64488c 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -200,20 +200,36 @@
}
std::shared_ptr<RequestMetadataQueue> queue;
- auto getQueueRet = session->getCaptureRequestMetadataQueue([&queue](const auto& descriptor) {
- queue = std::make_shared<RequestMetadataQueue>(descriptor);
- if (!queue->isValid() || queue->availableToWrite() <= 0) {
- ALOGW("HAL returns empty request metadata fmq, not use it");
- queue = nullptr;
- // don't use the queue onwards.
- }
- });
- if (!getQueueRet.isOk()) {
- ALOGW("Transaction error when getting request metadata fmq: %s, not use it",
- getQueueRet.description().c_str());
+ auto requestQueueRet = session->getCaptureRequestMetadataQueue(
+ [&queue](const auto& descriptor) {
+ queue = std::make_shared<RequestMetadataQueue>(descriptor);
+ if (!queue->isValid() || queue->availableToWrite() <= 0) {
+ ALOGE("HAL returns empty request metadata fmq, not use it");
+ queue = nullptr;
+ // don't use the queue onwards.
+ }
+ });
+ if (!requestQueueRet.isOk()) {
+ ALOGE("Transaction error when getting request metadata fmq: %s, not use it",
+ requestQueueRet.description().c_str());
queue = nullptr;
// Don't use the queue onwards.
}
+ auto resultQueueRet = session->getCaptureResultMetadataQueue(
+ [&queue = mResultMetadataQueue](const auto& descriptor) {
+ queue = std::make_unique<ResultMetadataQueue>(descriptor);
+ if (!queue->isValid() || queue->availableToWrite() <= 0) {
+ ALOGE("HAL returns empty result metadata fmq, not use it");
+ queue = nullptr;
+ // Don't use the queue onwards.
+ }
+ });
+ if (!resultQueueRet.isOk()) {
+ ALOGE("Transaction error when getting result metadata queue from camera session: %s",
+ resultQueueRet.description().c_str());
+ mResultMetadataQueue = nullptr;
+ // Don't use the queue onwards.
+ }
// TODO: camera service will absorb 3_2/3_3/3_4 differences in the future
// for now use 3_4 to keep legacy devices working
@@ -954,24 +970,56 @@
return res;
}
-
+// Only one processCaptureResult should be called at a time, so
+// the locks won't block. The locks are present here simply to enforce this.
hardware::Return<void> Camera3Device::processCaptureResult(
const hardware::hidl_vec<
hardware::camera::device::V3_2::CaptureResult>& results) {
- for (const auto& result : results) {
- processOneCaptureResult(result);
+
+ if (mProcessCaptureResultLock.tryLock() != OK) {
+ // This should never happen; it indicates a wrong client implementation
+ // that doesn't follow the contract. But, we can be tolerant here.
+ ALOGE("%s: callback overlapped! waiting 1s...",
+ __FUNCTION__);
+ if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
+ ALOGE("%s: cannot acquire lock in 1s, dropping results",
+ __FUNCTION__);
+ // really don't know what to do, so bail out.
+ return hardware::Void();
+ }
}
+ for (const auto& result : results) {
+ processOneCaptureResultLocked(result);
+ }
+ mProcessCaptureResultLock.unlock();
return hardware::Void();
}
-void Camera3Device::processOneCaptureResult(
+void Camera3Device::processOneCaptureResultLocked(
const hardware::camera::device::V3_2::CaptureResult& result) {
camera3_capture_result r;
status_t res;
r.frame_number = result.frameNumber;
- if (result.result.size() != 0) {
- r.result = reinterpret_cast<const camera_metadata_t*>(result.result.data());
- size_t expected_metadata_size = result.result.size();
+
+ hardware::camera::device::V3_2::CameraMetadata resultMetadata;
+ if (result.fmqResultSize > 0) {
+ resultMetadata.resize(result.fmqResultSize);
+ if (mResultMetadataQueue == nullptr) {
+ return; // logged in initialize()
+ }
+ if (!mResultMetadataQueue->read(resultMetadata.data(), result.fmqResultSize)) {
+ ALOGE("%s: Frame %d: Cannot read camera metadata from fmq, size = %" PRIu64,
+ __FUNCTION__, result.frameNumber, result.fmqResultSize);
+ return;
+ }
+ } else {
+ resultMetadata.setToExternal(const_cast<uint8_t *>(result.result.data()),
+ result.result.size());
+ }
+
+ if (resultMetadata.size() != 0) {
+ r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
+ size_t expected_metadata_size = resultMetadata.size();
if ((res = validate_camera_metadata_structure(r.result, &expected_metadata_size)) != OK) {
ALOGE("%s: Frame %d: Invalid camera metadata received by camera service from HAL: %s (%d)",
__FUNCTION__, result.frameNumber, strerror(-res), res);
@@ -3132,7 +3180,7 @@
reinterpret_cast<const camera_metadata_t*>(request.data());
size_t expectedSize = request.size();
int ret = validate_camera_metadata_structure(r, &expectedSize);
- if (ret == OK) {
+ if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
*requestTemplate = clone_camera_metadata(r);
if (*requestTemplate == nullptr) {
ALOGE("%s: Unable to clone camera metadata received from HAL",
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 844106b..8b76a97 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -185,6 +185,7 @@
// internal typedefs
using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+ using ResultMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
static const size_t kDumpLockAttempts = 10;
static const size_t kDumpSleepDuration = 100000; // 0.10 sec
@@ -223,6 +224,9 @@
// Flag indicating is the current active stream configuration is constrained high speed.
bool mIsConstrainedHighSpeedConfiguration;
+ // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
+ std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
+
/**** Scope for mLock ****/
/**
@@ -290,9 +294,7 @@
size_t result = 1;
result = 31 * result + buf->numFds;
- result = 31 * result + buf->numInts;
- int length = buf->numFds + buf->numInts;
- for (int i = 0; i < length; i++) {
+ for (int i = 0; i < buf->numFds; i++) {
result = 31 * result + buf->data[i];
}
return result;
@@ -301,9 +303,8 @@
struct BufferComparator {
bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
- if (buf1->numFds == buf2->numFds && buf1->numInts == buf2->numInts) {
- int length = buf1->numFds + buf1->numInts;
- for (int i = 0; i < length; i++) {
+ if (buf1->numFds == buf2->numFds) {
+ for (int i = 0; i < buf1->numFds; i++) {
if (buf1->data[i] != buf2->data[i]) {
return false;
}
@@ -463,12 +464,15 @@
const hardware::hidl_vec<
hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
- // Handle one capture result
- void processOneCaptureResult(
+ // Handle one capture result. Assume that mProcessCaptureResultLock is held.
+ void processOneCaptureResultLocked(
const hardware::camera::device::V3_2::CaptureResult& results);
// Handle one notify message
void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
+ // lock to ensure only one processCaptureResult is called at a time.
+ Mutex mProcessCaptureResultLock;
+
/**
* Common initialization code shared by both HAL paths
*
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 3a4546b..c4e4cff 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -46,6 +46,11 @@
int main(int argc __unused, char** argv)
{
LOG(INFO) << "mediacodecservice starting";
+ bool treble = property_get_bool("persist.media.treble_omx", true);
+ if (treble) {
+ android::ProcessState::initWithDriver("/dev/vndbinder");
+ }
+
signal(SIGPIPE, SIG_IGN);
SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
@@ -54,7 +59,7 @@
::android::hardware::configureRpcThreadpool(64, false);
sp<ProcessState> proc(ProcessState::self());
- if (property_get_bool("persist.media.treble_omx", true)) {
+ if (treble) {
using namespace ::android::hardware::media::omx::V1_0;
sp<IOmx> omx = new implementation::Omx();
if (omx == nullptr) {
diff --git a/services/mediadrm/FactoryLoader.h b/services/mediadrm/FactoryLoader.h
index 1e03e9b..d7f1118 100644
--- a/services/mediadrm/FactoryLoader.h
+++ b/services/mediadrm/FactoryLoader.h
@@ -88,7 +88,7 @@
}
// no luck, have to search
- String8 dirPath("/vendor/lib/mediacas");
+ String8 dirPath("/system/lib/mediacas");
DIR* pDir = opendir(dirPath.string());
if (pDir == NULL) {
@@ -123,7 +123,7 @@
results->clear();
- String8 dirPath("/vendor/lib/mediacas");
+ String8 dirPath("/system/lib/mediacas");
DIR* pDir = opendir(dirPath.string());
if (pDir == NULL) {
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 84fa227..65b17bc 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
#include <assert.h>
#include <map>
#include <mutex>
@@ -28,13 +32,18 @@
ANDROID_SINGLETON_STATIC_INSTANCE(AAudioEndpointManager);
AAudioEndpointManager::AAudioEndpointManager()
- : Singleton<AAudioEndpointManager>() {
+ : Singleton<AAudioEndpointManager>()
+ , mInputs()
+ , mOutputs() {
}
-AAudioServiceEndpoint *AAudioEndpointManager::findEndpoint(AAudioService &audioService, int32_t deviceId,
+AAudioServiceEndpoint *AAudioEndpointManager::openEndpoint(AAudioService &audioService, int32_t deviceId,
aaudio_direction_t direction) {
AAudioServiceEndpoint *endpoint = nullptr;
std::lock_guard<std::mutex> lock(mLock);
+
+ // Try to find an existing endpoint.
+ ALOGD("AAudioEndpointManager::openEndpoint(), device = %d, dir = %d", deviceId, direction);
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
endpoint = mInputs[deviceId];
@@ -48,11 +57,11 @@
}
// If we can't find an existing one then open one.
- ALOGD("AAudioEndpointManager::findEndpoint(), found %p", endpoint);
+ ALOGD("AAudioEndpointManager::openEndpoint(), found %p", endpoint);
if (endpoint == nullptr) {
endpoint = new AAudioServiceEndpoint(audioService);
if (endpoint->open(deviceId, direction) != AAUDIO_OK) {
- ALOGD("AAudioEndpointManager::findEndpoint(), open failed");
+ ALOGE("AAudioEndpointManager::findEndpoint(), open failed");
delete endpoint;
endpoint = nullptr;
} else {
@@ -66,22 +75,37 @@
}
}
}
+
+ if (endpoint != nullptr) {
+ // Increment the reference count under this lock.
+ endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
+ }
+
return endpoint;
}
-// FIXME add reference counter for serviceEndpoints and removed on last use.
-
-void AAudioEndpointManager::removeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
- aaudio_direction_t direction = serviceEndpoint->getDirection();
- int32_t deviceId = serviceEndpoint->getDeviceId();
-
+void AAudioEndpointManager::closeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
std::lock_guard<std::mutex> lock(mLock);
- switch(direction) {
- case AAUDIO_DIRECTION_INPUT:
- mInputs.erase(deviceId);
- break;
- case AAUDIO_DIRECTION_OUTPUT:
- mOutputs.erase(deviceId);
- break;
+ if (serviceEndpoint == nullptr) {
+ return;
}
-}
\ No newline at end of file
+
+ // Decrement the reference count under this lock.
+ int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
+ serviceEndpoint->setReferenceCount(newRefCount);
+ if (newRefCount <= 0) {
+ aaudio_direction_t direction = serviceEndpoint->getDirection();
+ int32_t deviceId = serviceEndpoint->getDeviceId();
+
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ mInputs.erase(deviceId);
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ mOutputs.erase(deviceId);
+ break;
+ }
+ serviceEndpoint->close();
+ delete serviceEndpoint;
+ }
+}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index 48b27f0..bbcfc1d 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -39,11 +39,11 @@
* @param direction
* @return endpoint or nullptr
*/
- AAudioServiceEndpoint *findEndpoint(android::AAudioService &audioService,
+ AAudioServiceEndpoint *openEndpoint(android::AAudioService &audioService,
int32_t deviceId,
aaudio_direction_t direction);
- void removeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+ void closeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
private:
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 70da339..43203d4 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -41,7 +41,7 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-void AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
fifo_frames_t framesLeft = mFramesPerBurst;
@@ -67,9 +67,10 @@
}
fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
if (framesLeft > 0) {
- ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
- framesLeft, mFramesPerBurst);
+ //ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
+ // framesLeft, mFramesPerBurst);
}
+ return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
}
void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 2191183..9155fec 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -31,7 +31,13 @@
void clear();
- void mix(android::FifoBuffer *fifo, float volume);
+ /**
+ * Mix from this FIFO
+ * @param fifo
+ * @param volume
+ * @return true if underflowed
+ */
+ bool mix(android::FifoBuffer *fifo, float volume);
void mixPart(float *destination, float *source, int32_t numFrames, float volume);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 723ef63..816d5ab 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -54,8 +54,8 @@
aaudio_result_t result = AAUDIO_OK;
AAudioServiceStreamBase *serviceStream = nullptr;
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ bool sharingModeMatchRequired = request.isSharingModeMatchRequired();
aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
- ALOGE("AAudioService::openStream(): sharingMode = %d", sharingMode);
if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
@@ -77,8 +77,9 @@
}
// if SHARED requested or if EXCLUSIVE failed
- if (serviceStream == nullptr) {
- ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_SHARED");
+ if (sharingMode == AAUDIO_SHARING_MODE_SHARED
+ || (serviceStream == nullptr && !sharingModeMatchRequired)) {
+ ALOGD("AAudioService::openStream(), try AAUDIO_SHARING_MODE_SHARED");
serviceStream = new AAudioServiceStreamShared(*this);
result = serviceStream->open(request, configurationOutput);
configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
@@ -126,9 +127,7 @@
ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("AAudioService::getStreamDescription(), handle = 0x%08x", streamHandle);
aaudio_result_t result = serviceStream->getDescription(parcelable);
- ALOGD("AAudioService::getStreamDescription(), result = %d", result);
// parcelable.dump();
return result;
}
@@ -140,7 +139,6 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->start();
- ALOGD("AAudioService::startStream(), serviceStream->start() returned %d", result);
return result;
}
@@ -154,6 +152,16 @@
return result;
}
+aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->stop();
+ return result;
+}
+
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream == nullptr) {
@@ -168,7 +176,6 @@
pid_t clientThreadId,
int64_t periodNanoseconds) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
if (serviceStream == nullptr) {
ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -193,7 +200,6 @@
pid_t clientProcessId,
pid_t clientThreadId) {
AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
- ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
if (serviceStream == nullptr) {
ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
streamHandle);
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 5a7a2b6..f5a7d2f 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -57,6 +57,8 @@
virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle);
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle);
+
virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 80551c9..b197798 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -14,6 +14,17 @@
* limitations under the License.
*/
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
#include <algorithm>
#include <mutex>
#include <vector>
@@ -30,6 +41,12 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
+// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
+#define DEFAULT_BUFFER_CAPACITY (48 * 8)
+
+// Use 2 for "double buffered"
+#define BUFFER_SIZE_IN_BURSTS 2
+
// The mStreamInternal will use a service interface that does not go through Binder.
AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
: mStreamInternal(audioService, true)
@@ -43,11 +60,18 @@
aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId, aaudio_direction_t direction) {
AudioStreamBuilder builder;
builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+ // Don't fall back to SHARED because that would cause recursion.
+ builder.setSharingModeMatchRequired(true);
builder.setDeviceId(deviceId);
builder.setDirection(direction);
+ builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
+
aaudio_result_t result = mStreamInternal.open(builder);
if (result == AAUDIO_OK) {
mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
+
+ int32_t desiredBufferSize = BUFFER_SIZE_IN_BURSTS * mStreamInternal.getFramesPerBurst();
+ mStreamInternal.setBufferSize(desiredBufferSize);
}
return result;
}
@@ -58,15 +82,12 @@
// TODO, maybe use an interface to reduce exposure
aaudio_result_t AAudioServiceEndpoint::registerStream(AAudioServiceStreamShared *sharedStream) {
- ALOGD("AAudioServiceEndpoint::registerStream(%p)", sharedStream);
- // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.push_back(sharedStream);
return AAUDIO_OK;
}
aaudio_result_t AAudioServiceEndpoint::unregisterStream(AAudioServiceStreamShared *sharedStream) {
- ALOGD("AAudioServiceEndpoint::unregisterStream(%p)", sharedStream);
std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
mRegisteredStreams.end());
@@ -75,7 +96,6 @@
aaudio_result_t AAudioServiceEndpoint::startStream(AAudioServiceStreamShared *sharedStream) {
// TODO use real-time technique to avoid mutex, eg. atomic command FIFO
- ALOGD("AAudioServiceEndpoint(): startStream() entering");
std::lock_guard<std::mutex> lock(mLockStreams);
mRunningStreams.push_back(sharedStream);
if (mRunningStreams.size() == 1) {
@@ -106,13 +126,10 @@
// Render audio in the application callback and then write the data to the stream.
void *AAudioServiceEndpoint::callbackLoop() {
- aaudio_result_t result = AAUDIO_OK;
-
ALOGD("AAudioServiceEndpoint(): callbackLoop() entering");
+ int32_t underflowCount = 0;
- result = mStreamInternal.requestStart();
- ALOGD("AAudioServiceEndpoint(): callbackLoop() after requestStart() %d, isPlaying() = %d",
- result, (int) mStreamInternal.isPlaying());
+ aaudio_result_t result = mStreamInternal.requestStart();
// result might be a frame count
while (mCallbackEnabled.load() && mStreamInternal.isPlaying() && (result >= 0)) {
@@ -123,12 +140,14 @@
for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
float volume = 0.5; // TODO get from system
- mMixer.mix(fifo, volume);
+ bool underflowed = mMixer.mix(fifo, volume);
+ underflowCount += underflowed ? 1 : 0;
+ // TODO log underflows in each stream
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
}
}
// Write audio data to stream using a blocking write.
- ALOGD("AAudioServiceEndpoint(): callbackLoop() write(%d)", getFramesPerBurst());
int64_t timeoutNanos = calculateReasonableTimeout(mStreamInternal.getFramesPerBurst());
result = mStreamInternal.write(mMixer.getOutputBuffer(), getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
@@ -141,11 +160,9 @@
}
}
- ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, result = %d, isPlaying() = %d",
- result, (int) mStreamInternal.isPlaying());
-
result = mStreamInternal.requestStop();
+ ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, %d underflows", underflowCount);
return NULL; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 020d38a..a4ceae6 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -56,6 +56,16 @@
void *callbackLoop();
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ int32_t getReferenceCount() const {
+ return mReferenceCount;
+ }
+
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ void setReferenceCount(int32_t count) {
+ mReferenceCount = count;
+ }
+
private:
aaudio_result_t startMixer_l();
aaudio_result_t stopMixer_l();
@@ -64,13 +74,14 @@
AudioStreamInternal mStreamInternal;
AAudioMixer mMixer;
- AAudioServiceStreamMMAP mStreamMMAP;
std::atomic<bool> mCallbackEnabled;
+ int32_t mReferenceCount = 0;
std::mutex mLockStreams;
std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
std::vector<AAudioServiceStreamShared *> mRunningStreams;
+
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index b15043d..d8882c9 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -63,6 +63,7 @@
}
aaudio_result_t AAudioServiceStreamBase::start() {
+ ALOGD("AAudioServiceStreamBase::start() send AAUDIO_SERVICE_EVENT_STARTED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
mState = AAUDIO_STREAM_STATE_STARTED;
mThreadEnabled.store(true);
@@ -78,14 +79,37 @@
processError();
return result;
}
+ ALOGD("AAudioServiceStreamBase::pause() send AAUDIO_SERVICE_EVENT_PAUSED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
mState = AAUDIO_STREAM_STATE_PAUSED;
return result;
}
+aaudio_result_t AAudioServiceStreamBase::stop() {
+ // TODO wait for data to be played out
+ sendCurrentTimestamp();
+ mThreadEnabled.store(false);
+ aaudio_result_t result = mAAudioThread.stop();
+ if (result != AAUDIO_OK) {
+ processError();
+ return result;
+ }
+ ALOGD("AAudioServiceStreamBase::stop() send AAUDIO_SERVICE_EVENT_STOPPED");
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
+ mState = AAUDIO_STREAM_STATE_STOPPED;
+ return result;
+}
+
+aaudio_result_t AAudioServiceStreamBase::flush() {
+ ALOGD("AAudioServiceStreamBase::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+ mState = AAUDIO_STREAM_STATE_FLUSHED;
+ return AAUDIO_OK;
+}
+
// implement Runnable
void AAudioServiceStreamBase::run() {
- ALOGD("AAudioServiceStreamMMAP::run() entering ----------------");
+ ALOGD("AAudioServiceStreamBase::run() entering ----------------");
TimestampScheduler timestampScheduler;
timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
timestampScheduler.start(AudioClock::getNanoseconds());
@@ -102,7 +126,7 @@
AudioClock::sleepUntilNanoTime(nextTime);
}
}
- ALOGD("AAudioServiceStreamMMAP::run() exiting ----------------");
+ ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
}
void AAudioServiceStreamBase::processError() {
@@ -122,6 +146,10 @@
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+ if (mUpMessageQueue == nullptr) {
+ ALOGE("writeUpMessageQueue(): mUpMessageQueue null! - stream not open");
+ return AAUDIO_ERROR_NULL;
+ }
int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
if (count != 1) {
ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
@@ -133,9 +161,11 @@
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
+ //ALOGD("sendCurrentTimestamp() called");
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
+ //ALOGD("sendCurrentTimestamp(): position %d", (int) command.timestamp.position);
command.what = AAudioServiceMessage::code::TIMESTAMP;
result = writeUpMessageQueue(&command);
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 91eec35..d6b6ee3 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -17,6 +17,7 @@
#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
#define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+#include <assert.h>
#include <mutex>
#include "fifo/FifoBuffer.h"
@@ -60,17 +61,22 @@
/**
* Start the flow of data.
*/
- virtual aaudio_result_t start() = 0;
+ virtual aaudio_result_t start();
/**
* Stop the flow of data such that start() can resume with loss of data.
*/
- virtual aaudio_result_t pause() = 0;
+ virtual aaudio_result_t pause();
+
+ /**
+ * Stop the flow of data after data in buffer has played.
+ */
+ virtual aaudio_result_t stop();
/**
* Discard any data held by the underlying HAL or Service.
*/
- virtual aaudio_result_t flush() = 0;
+ virtual aaudio_result_t flush();
// -------------------------------------------------------------------
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index b70c625..b2e7fc9 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -55,6 +55,11 @@
aaudio_result_t AAudioServiceStreamMMAP::close() {
ALOGD("AAudioServiceStreamMMAP::close() called, %p", mMmapStream.get());
mMmapStream.clear(); // TODO review. Is that all we have to do?
+ // Apparently the above close is asynchronous. An attempt to open a new device
+ // right after a close can fail. Also some callbacks may still be in flight!
+ // FIXME Make closing synchronous.
+ AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
+
return AAudioServiceStreamBase::close();
}
@@ -79,8 +84,8 @@
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
audio_port_handle_t deviceId = configurationInput.getDeviceId();
- ALOGI("open request dump()");
- request.dump();
+ // ALOGI("open request dump()");
+ // request.dump();
mMmapClient.clientUid = request.getUserId();
mMmapClient.clientPid = request.getProcessId();
@@ -198,16 +203,25 @@
return (result1 != AAUDIO_OK) ? result1 : result2;
}
+aaudio_result_t AAudioServiceStreamMMAP::stop() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+ aaudio_result_t result1 = AAudioServiceStreamBase::stop();
+ aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+ mFramesRead.reset32();
+ return (result1 != AAUDIO_OK) ? result1 : result2;
+}
+
/**
* Discard any data held by the underlying HAL or Service.
*/
aaudio_result_t AAudioServiceStreamMMAP::flush() {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
// TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
- ALOGD("AAudioServiceStreamMMAP::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+ ALOGD("AAudioServiceStreamMMAP::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
mState = AAUDIO_STREAM_STATE_FLUSHED;
- return AAUDIO_OK;
+ return AAudioServiceStreamBase::flush();;
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index f121c5c..a8e63a6 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -66,6 +66,8 @@
*/
aaudio_result_t pause() override;
+ aaudio_result_t stop() override;
+
/**
* Discard any data held by the underlying HAL or Service.
*
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index cd9336b..b5d9927 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -61,7 +61,7 @@
ALOGD("AAudioServiceStreamShared::open(), direction = %d", direction);
AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
- mServiceEndpoint = mEndpointManager.findEndpoint(mAudioService, deviceId, direction);
+ mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, deviceId, direction);
ALOGD("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
if (mServiceEndpoint == nullptr) {
return AAUDIO_ERROR_UNAVAILABLE;
@@ -72,6 +72,7 @@
if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
} else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
return AAUDIO_ERROR_INVALID_FORMAT;
}
@@ -79,6 +80,8 @@
if (mSampleRate == AAUDIO_FORMAT_UNSPECIFIED) {
mSampleRate = mServiceEndpoint->getSampleRate();
} else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+ mSampleRate, mServiceEndpoint->getSampleRate());
return AAUDIO_ERROR_INVALID_RATE;
}
@@ -86,17 +89,22 @@
if (mSamplesPerFrame == AAUDIO_FORMAT_UNSPECIFIED) {
mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
} else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+ ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
+ mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
return AAUDIO_ERROR_OUT_OF_RANGE;
}
// Determine this stream's shared memory buffer capacity.
mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
int32_t minCapacityFrames = configurationInput.getBufferCapacity();
- int32_t numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
- if (numBursts < MIN_BURSTS_PER_BUFFER) {
- numBursts = MIN_BURSTS_PER_BUFFER;
- } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
- numBursts = MAX_BURSTS_PER_BUFFER;
+ int32_t numBursts = MAX_BURSTS_PER_BUFFER;
+ if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
+ numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+ if (numBursts < MIN_BURSTS_PER_BUFFER) {
+ numBursts = MIN_BURSTS_PER_BUFFER;
+ } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
+ numBursts = MAX_BURSTS_PER_BUFFER;
+ }
}
mCapacityInFrames = numBursts * mFramesPerBurst;
ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
@@ -122,8 +130,12 @@
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamShared::start() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// Add this stream to the mixer.
- aaudio_result_t result = mServiceEndpoint->startStream(this);
+ aaudio_result_t result = endpoint->startStream(this);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
processError();
@@ -139,15 +151,31 @@
* An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamShared::pause() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// Add this stream to the mixer.
- aaudio_result_t result = mServiceEndpoint->stopStream(this);
+ aaudio_result_t result = endpoint->stopStream(this);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
+ processError();
+ }
+ return AAudioServiceStreamBase::pause();
+}
+
+aaudio_result_t AAudioServiceStreamShared::stop() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Add this stream to the mixer.
+ aaudio_result_t result = endpoint->stopStream(this);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
processError();
- } else {
- result = AAudioServiceStreamBase::start();
}
- return AAUDIO_OK;
+ return AAudioServiceStreamBase::stop();
}
/**
@@ -157,15 +185,21 @@
*/
aaudio_result_t AAudioServiceStreamShared::flush() {
// TODO make sure we are paused
- return AAUDIO_OK;
+ // TODO actually flush the data
+ return AAudioServiceStreamBase::flush() ;
}
aaudio_result_t AAudioServiceStreamShared::close() {
pause();
// TODO wait for pause() to synchronize
- mServiceEndpoint->unregisterStream(this);
- mServiceEndpoint->close();
- mServiceEndpoint = nullptr;
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint != nullptr) {
+ endpoint->unregisterStream(this);
+
+ AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+ mEndpointManager.closeEndpoint(endpoint);
+ mServiceEndpoint = nullptr;
+ }
return AAudioServiceStreamBase::close();
}
@@ -189,10 +223,15 @@
mServiceEndpoint = nullptr;
}
+void AAudioServiceStreamShared::markTransferTime(int64_t nanoseconds) {
+ mMarkedPosition = mAudioDataQueue->getFifoBuffer()->getReadCounter();
+ mMarkedTime = nanoseconds;
+}
aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
int64_t *timeNanos) {
- *positionFrames = mAudioDataQueue->getFifoBuffer()->getReadCounter();
- *timeNanos = AudioClock::getNanoseconds();
+ // TODO get these two numbers as an atomic pair
+ *positionFrames = mMarkedPosition;
+ *timeNanos = mMarkedTime;
return AAUDIO_OK;
}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index f6df7ce..b981387 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -66,6 +66,11 @@
aaudio_result_t pause() override;
/**
+ * Stop the flow of data after data in buffer has played.
+ */
+ aaudio_result_t stop() override;
+
+ /**
* Discard any data held by the underlying HAL or Service.
*
* This is not guaranteed to be synchronous but it currently is.
@@ -77,6 +82,11 @@
android::FifoBuffer *getDataFifoBuffer() { return mAudioDataQueue->getFifoBuffer(); }
+ /* Keep a record of when a buffer transfer completed.
+ * This allows for a more accurate timing model.
+ */
+ void markTransferTime(int64_t nanoseconds);
+
void onStop();
void onDisconnect();
@@ -91,6 +101,9 @@
android::AAudioService &mAudioService;
AAudioServiceEndpoint *mServiceEndpoint = nullptr;
SharedRingBuffer *mAudioDataQueue;
+
+ int64_t mMarkedPosition = 0;
+ int64_t mMarkedTime = 0;
};
} /* namespace aaudio */
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 3e7a7ce..e21aae3 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -17,6 +17,9 @@
include $(CLEAR_VARS)
ifeq ($(SOUND_TRIGGER_USE_STUB_MODULE), 1)
+ ifneq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
+ $(error Requires building with USE_LEGACY_LOCAL_AUDIO_HAL=true)
+ endif
LOCAL_CFLAGS += -DSOUND_TRIGGER_USE_STUB_MODULE
endif
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index 7cc8a2b..0cd5cf7 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -252,6 +252,8 @@
SoundTriggerHalHidl::SoundTriggerHalHidl(const char *moduleName)
: mModuleName(moduleName), mNextUniqueId(1)
{
+ LOG_ALWAYS_FATAL_IF(strcmp(mModuleName, "primary") != 0,
+ "Treble soundtrigger only supports primary module");
}
SoundTriggerHalHidl::~SoundTriggerHalHidl()
@@ -265,9 +267,7 @@
if (mModuleName == NULL) {
mModuleName = "primary";
}
- std::string serviceName = "sound_trigger.";
- serviceName.append(mModuleName);
- mISoundTrigger = ISoundTriggerHw::getService(serviceName);
+ mISoundTrigger = ISoundTriggerHw::getService();
if (mISoundTrigger != 0) {
mISoundTrigger->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
}