Merge "Cleaning departed users from OWNERS"
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
index a53ba8c..29b036e 100644
--- a/media/libheadtracking/StillnessDetector-test.cpp
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -84,8 +84,8 @@
const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
const Pose3f withinThreshold =
- baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
- const Pose3f outsideThreshold = baseline * Pose3f(rotateZ(0.08));
+ baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.03) * rotateY(-0.03));
+ const Pose3f outsideThreshold = baseline * Pose3f(rotateZ(0.06));
EXPECT_FALSE(detector.calculate(0));
detector.setInput(0, baseline);
EXPECT_FALSE(detector.calculate(0));
diff --git a/media/libheadtracking/StillnessDetector.cpp b/media/libheadtracking/StillnessDetector.cpp
index 832351d..5fa4e3a 100644
--- a/media/libheadtracking/StillnessDetector.cpp
+++ b/media/libheadtracking/StillnessDetector.cpp
@@ -19,7 +19,8 @@
namespace android {
namespace media {
-StillnessDetector::StillnessDetector(const Options& options) : mOptions(options) {}
+StillnessDetector::StillnessDetector(const Options& options)
+ : mOptions(options), mCosHalfRotationalThreshold(cos(mOptions.rotationalThreshold / 2)) {}
void StillnessDetector::reset() {
mFifo.clear();
@@ -77,17 +78,15 @@
// Check translation. We use the L1 norm to reduce computational load on expense of accuracy.
// The L1 norm is an upper bound for the actual (L2) norm, so this approach will err on the side
// of "not near".
- if ((pose1.translation() - pose2.translation()).lpNorm<1>() >=
- mOptions.translationalThreshold) {
+ if ((pose1.translation() - pose2.translation()).lpNorm<1>() > mOptions.translationalThreshold) {
return false;
}
- // Check orientation. We use the L1 norm of the imaginary components of the quaternion to reduce
- // computational load on expense of accuracy. For small angles, those components are approx.
- // equal to the angle of rotation and so the norm is approx. the total angle of rotation. The
- // L1 norm is an upper bound, so this approach will err on the side of "not near".
- if ((pose1.rotation().vec() - pose2.rotation().vec()).lpNorm<1>() >=
- mOptions.rotationalThreshold) {
+ // Check orientation.
+ // The angle x between the quaternions is greater than that threshold iff
+ // cos(x/2) < cos(threshold/2).
+ // cos(x/2) can be efficiently calculated as the dot product of both quaternions.
+ if (pose1.rotation().dot(pose2.rotation()) < mCosHalfRotationalThreshold) {
return false;
}
diff --git a/media/libheadtracking/StillnessDetector.h b/media/libheadtracking/StillnessDetector.h
index fd26aa9..cae9d9d 100644
--- a/media/libheadtracking/StillnessDetector.h
+++ b/media/libheadtracking/StillnessDetector.h
@@ -83,6 +83,8 @@
};
const Options mOptions;
+ // Precalculated cos(mOptions.rotationalThreshold / 2)
+ const float mCosHalfRotationalThreshold;
std::deque<TimestampedPose> mFifo;
bool mWindowFull = false;
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 46c701c..0e4dfcf 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -175,6 +175,24 @@
"log_session_id",
};
+static constexpr const char * const AudioTrackStatusFields[] {
+ "mediametrics_audiotrackstatus_reported",
+ "status",
+ "debug_message",
+ "sub_code",
+ "uid",
+ "event",
+ "flags",
+ "content_type",
+ "usage",
+ "encoding",
+ "channel_mask",
+ "buffer_frame_count",
+ "sample_rate",
+ "speed",
+ "pitch",
+};
+
static constexpr const char * const AudioDeviceConnectionFields[] = {
"mediametrics_audiodeviceconnection_reported",
"input_devices",
@@ -516,6 +534,86 @@
// Add to the heat map - we automatically track every item's status to see
// the types of errors and the frequency of errors.
mHeatMap.add(prefixKey, suffixKey, eventStr, statusString, uid, message, subCode);
+
+ // Certain keys/event pairs are sent to statsd.
+ // Note that the prefixes often end with a '.' so we use startsWith.
+ if (startsWith(key, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK)
+ && eventStr == AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE) {
+ const int atom_status = types::lookup<types::STATUS, int32_t>(statusString);
+
+ // currently we only send create status events.
+ const int32_t event =
+ android::util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__EVENT__EVENT_CREATE;
+
+ // The following fields should all be present in a create event.
+ std::string flagsStr;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ORIGINALFLAGS, &flagsStr),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ORIGINALFLAGS);
+ const auto flags = types::lookup<types::OUTPUT_FLAG, int32_t>(flagsStr);
+
+ // AMEDIAMETRICS_PROP_SESSIONID omitted from atom
+
+ std::string contentTypeStr;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CONTENTTYPE, &contentTypeStr),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CONTENTTYPE);
+ const auto contentType = types::lookup<types::CONTENT_TYPE, int32_t>(contentTypeStr);
+
+ std::string usageStr;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_USAGE, &usageStr),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_USAGE);
+ const auto usage = types::lookup<types::USAGE, int32_t>(usageStr);
+
+ // AMEDIAMETRICS_PROP_SELECTEDDEVICEID omitted from atom
+
+ std::string encodingStr;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ENCODING, &encodingStr),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ENCODING);
+ const auto encoding = types::lookup<types::ENCODING, int32_t>(encodingStr);
+
+ int32_t channelMask = 0;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CHANNELMASK, &channelMask),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CHANNELMASK);
+ int32_t frameCount = 0;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_FRAMECOUNT, &frameCount),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_FRAMECOUNT);
+ int32_t sampleRate = 0;
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_SAMPLERATE, &sampleRate),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_SAMPLERATE);
+ double speed = 0.f; // default is 1.f
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, &speed),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_SPEED);
+ double pitch = 0.f; // default is 1.f
+ ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, &pitch),
+ "%s: %s missing %s field",
+ __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_PITCH);
+ const auto [ result, str ] = sendToStatsd(AudioTrackStatusFields,
+ CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED)
+ , atom_status
+ , message.c_str()
+ , subCode
+ , uid
+ , event
+ , flags
+ , contentType
+ , usage
+ , encoding
+ , (int64_t)channelMask
+ , frameCount
+ , sampleRate
+ , (float)speed
+ , (float)pitch
+ );
+ ALOGV("%s: statsd %s", __func__, str.c_str());
+ mStatsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED, str);
+ }
}
// HELPER METHODS
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index b67967b..7e406cc 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -192,6 +192,31 @@
return map;
}
+const std::unordered_map<std::string, int32_t>& getStatusMap() {
+ // DO NOT MODIFY VALUES(OK to add new ones).
+ static std::unordered_map<std::string, int32_t> map {
+ {"",
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__OK},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__OK},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_ARGUMENT},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_IO},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_MEMORY},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_SECURITY},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_STATE},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_TIMEOUT},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,
+ util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN},
+ };
+ return map;
+}
+
// Helper: Create the corresponding int32 from string flags split with '|'.
template <typename Traits>
int32_t int32FromFlags(const std::string &flags)
@@ -433,6 +458,17 @@
}
template <>
+int32_t lookup<STATUS>(const std::string &status)
+{
+ auto& map = getStatusMap();
+ auto it = map.find(status);
+ if (it == map.end()) {
+ return util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN;
+ }
+ return it->second;
+}
+
+template <>
int32_t lookup<THREAD_TYPE>(const std::string &threadType)
{
auto& map = getAudioThreadTypeMap();
diff --git a/services/mediametrics/AudioTypes.h b/services/mediametrics/AudioTypes.h
index 4394d79..5dbff9b 100644
--- a/services/mediametrics/AudioTypes.h
+++ b/services/mediametrics/AudioTypes.h
@@ -39,6 +39,10 @@
};
// Enumeration for all the string translations to integers (generally int32_t) unless noted.
+// This is used to index the template method below:
+// template <AudioEnumCategory C, typename T, typename S> T lookup(const S &str);
+//
+// Okay to keep AudioEnumCategory alphabetical and add new translations in the middle.
enum AudioEnumCategory {
AAUDIO_DIRECTION,
AAUDIO_PERFORMANCE_MODE,
@@ -51,6 +55,7 @@
OUTPUT_DEVICE, // int64_t
OUTPUT_FLAG,
SOURCE_TYPE,
+ STATUS,
STREAM_TYPE,
THREAD_TYPE,
TRACK_TRAITS,