Merge "Expand Gatekeeper test" am: a3645d38e9 am: 3a17b7adf6 am: 2b180742ae am: 2049dbaf56 am: 6dee337ab2 am: 37844db726
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1609723
Change-Id: I4ace0284ceff302c14f3b6962f3ce9f628e42857
diff --git a/audio/core/all-versions/default/Android.bp b/audio/core/all-versions/default/Android.bp
index 2785739..901b7ee 100644
--- a/audio/core/all-versions/default/Android.bp
+++ b/audio/core/all-versions/default/Android.bp
@@ -56,6 +56,7 @@
"android.hardware.audio-impl_headers",
"android.hardware.audio.common.util@all-versions",
"libaudioclient_headers",
+ "libaudioutils_headers",
"libaudio_system_headers",
"libhardware_headers",
"libmedia_headers",
diff --git a/audio/core/all-versions/default/StreamOut.cpp b/audio/core/all-versions/default/StreamOut.cpp
index 6eed3da..d027231 100644
--- a/audio/core/all-versions/default/StreamOut.cpp
+++ b/audio/core/all-versions/default/StreamOut.cpp
@@ -28,6 +28,7 @@
#include <HidlUtils.h>
#include <android/log.h>
+#include <audio_utils/Metadata.h>
#include <hardware/audio.h>
#include <util/CoreUtils.h>
#include <utils/Trace.h>
@@ -742,7 +743,11 @@
switch (event) {
case STREAM_EVENT_CBK_TYPE_CODEC_FORMAT_CHANGED: {
hidl_vec<uint8_t> audioMetadata;
- audioMetadata.setToExternal((uint8_t*)param, strlen((char*)param));
+ // void* param is the byte string buffer from byte_string_from_audio_metadata().
+ // As the byte string buffer may have embedded zeroes, we cannot use strlen()
+ // but instead use audio_utils::metadata::dataByteStringLen().
+ audioMetadata.setToExternal((uint8_t*)param, audio_utils::metadata::dataByteStringLen(
+ (const uint8_t*)param));
result = eventCallback->onCodecFormatChanged(audioMetadata);
} break;
default:
diff --git a/automotive/can/1.0/default/libnl++/Android.bp b/automotive/can/1.0/default/libnl++/Android.bp
index 790adcd..2ebd1b4 100644
--- a/automotive/can/1.0/default/libnl++/Android.bp
+++ b/automotive/can/1.0/default/libnl++/Android.bp
@@ -35,6 +35,7 @@
"protocols/generic/Generic.cpp",
"protocols/generic/GenericMessageBase.cpp",
"protocols/generic/Unknown.cpp",
+ "protocols/generic/families/Mac80211hwsim.cpp",
"protocols/generic/families/Nl80211.cpp",
"protocols/route/Link.cpp",
"protocols/route/Route.cpp",
@@ -42,6 +43,7 @@
"protocols/MessageDefinition.cpp",
"protocols/NetlinkProtocol.cpp",
"protocols/all.cpp",
+ "protocols/structs.cpp",
"Attributes.cpp",
"MessageFactory.cpp",
"MessageMutator.cpp",
diff --git a/automotive/can/1.0/default/libnl++/MessageMutator.cpp b/automotive/can/1.0/default/libnl++/MessageMutator.cpp
index 00b48a6..de2a2b1 100644
--- a/automotive/can/1.0/default/libnl++/MessageMutator.cpp
+++ b/automotive/can/1.0/default/libnl++/MessageMutator.cpp
@@ -19,7 +19,7 @@
namespace android::nl {
MessageMutator::MessageMutator(nlmsghdr* buffer, size_t totalLen)
- : mConstBuffer(buffer, totalLen), mMutableBuffer(buffer) {
+ : mMutableBuffer(buffer), mTotalLen(totalLen) {
CHECK(totalLen >= sizeof(nlmsghdr));
}
@@ -27,8 +27,12 @@
return mMutableBuffer;
}
+Buffer<nlmsghdr> MessageMutator::constBuffer() const {
+ return {mMutableBuffer, mTotalLen};
+}
+
MessageMutator::operator Buffer<nlmsghdr>() const {
- return mConstBuffer;
+ return constBuffer();
}
uint64_t MessageMutator::read(Buffer<nlattr> attr) const {
@@ -37,7 +41,8 @@
void MessageMutator::write(Buffer<nlattr> attr, uint64_t val) const {
const auto attrData = attr.data<uint64_t>();
- const auto offset = mConstBuffer.getOffset(attrData);
+ // TODO(b/177251183): deduplicate this code against fragment()
+ const auto offset = constBuffer().getOffset(attrData);
CHECK(offset.has_value()) << "Trying to write attribute that's not a member of this message";
const auto writeableBuffer = reinterpret_cast<uint8_t*>(mMutableBuffer) + *offset;
@@ -47,4 +52,40 @@
memcpy(writeableBuffer, &val, std::min(sizeof(val), attrSize));
}
+MessageMutator MessageMutator::fragment(Buffer<nlmsghdr> buf) const {
+ const auto offset = constBuffer().getOffset(buf);
+ CHECK(offset.has_value()) << "Trying to modify a fragment outside of buffer range";
+
+ const auto writeableBuffer = reinterpret_cast<nlmsghdr*>(uintptr_t(mMutableBuffer) + *offset);
+ const auto len = buf.getRaw().len();
+ CHECK(len <= mTotalLen - *offset);
+
+ return {writeableBuffer, len};
+}
+
+MessageMutator::iterator MessageMutator::begin() const {
+ return {*this, constBuffer().begin()};
+}
+
+MessageMutator::iterator MessageMutator::end() const {
+ return {*this, constBuffer().end()};
+}
+
+MessageMutator::iterator::iterator(const MessageMutator& container,
+ Buffer<nlmsghdr>::iterator current)
+ : mContainer(container), mCurrent(current) {}
+
+MessageMutator::iterator MessageMutator::iterator::operator++() {
+ ++mCurrent;
+ return *this;
+}
+
+bool MessageMutator::iterator::operator==(const iterator& other) const {
+ return other.mCurrent == mCurrent;
+}
+
+const MessageMutator MessageMutator::iterator::operator*() const {
+ return mContainer.fragment(*mCurrent);
+}
+
} // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h b/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
index d759a0a..4cabb9a 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
@@ -138,7 +138,7 @@
class raw_iterator : public iterator {
public:
iterator operator++() {
- this->mCurrent.mData++; // ignore alignment
+ ++this->mCurrent.mData; // ignore alignment
return *this;
}
const T& operator*() const { return *this->mCurrent.mData; }
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/Message.h b/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
index 50b3c4b..29f397d 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
@@ -34,7 +34,7 @@
* a single instance can only be used by a single thread - the one owning the underlying buffer).
*/
template <typename T>
-class Message {
+class Message : public Buffer<nlmsghdr> {
public:
/**
* Validate buffer contents as a message carrying T data and create instance of parsed message.
@@ -51,7 +51,7 @@
const auto attributes = buf.data<nlattr>(sizeof(T));
- return Message<T>(nlHeader, dataHeader, attributes);
+ return Message<T>(buf, nlHeader, dataHeader, attributes);
}
/**
@@ -94,8 +94,9 @@
const T* operator->() const { return &data; }
private:
- Message(const nlmsghdr& nlHeader, const T& dataHeader, Attributes attributes)
- : header(nlHeader), data(dataHeader), attributes(attributes) {}
+ Message(Buffer<nlmsghdr> buffer, const nlmsghdr& nlHeader, const T& dataHeader,
+ Attributes attributes)
+ : Buffer<nlmsghdr>(buffer), header(nlHeader), data(dataHeader), attributes(attributes) {}
};
} // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h b/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
index 7d495e9..baadc44 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
@@ -53,9 +53,27 @@
*/
void write(Buffer<nlattr> attr, uint64_t val) const;
+ class iterator {
+ public:
+ iterator(const MessageMutator& container, Buffer<nlmsghdr>::iterator current);
+
+ iterator operator++();
+ bool operator==(const iterator& other) const;
+ const MessageMutator operator*() const;
+
+ protected:
+ const MessageMutator& mContainer;
+ Buffer<nlmsghdr>::iterator mCurrent;
+ };
+ iterator begin() const;
+ iterator end() const;
+
private:
- const Buffer<nlmsghdr> mConstBuffer;
nlmsghdr* mMutableBuffer;
+ size_t mTotalLen;
+
+ Buffer<nlmsghdr> constBuffer() const;
+ MessageMutator fragment(Buffer<nlmsghdr> buf) const;
};
} // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h b/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h
new file mode 100644
index 0000000..9b811f8
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// API definitions from kernel drivers/net/wireless/mac80211_hwsim.h
+
+#define BIT(n) (1 << (n))
+
+enum hwsim_tx_control_flags {
+ HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0),
+ HWSIM_TX_CTL_NO_ACK = BIT(1),
+ HWSIM_TX_STAT_ACK = BIT(2),
+};
+
+enum {
+ HWSIM_CMD_UNSPEC,
+ HWSIM_CMD_REGISTER,
+ HWSIM_CMD_FRAME,
+ HWSIM_CMD_TX_INFO_FRAME,
+ HWSIM_CMD_NEW_RADIO,
+ HWSIM_CMD_DEL_RADIO,
+ HWSIM_CMD_GET_RADIO,
+ HWSIM_CMD_ADD_MAC_ADDR,
+ HWSIM_CMD_DEL_MAC_ADDR,
+};
+
+enum {
+ HWSIM_ATTR_UNSPEC,
+ HWSIM_ATTR_ADDR_RECEIVER,
+ HWSIM_ATTR_ADDR_TRANSMITTER,
+ HWSIM_ATTR_FRAME,
+ HWSIM_ATTR_FLAGS,
+ HWSIM_ATTR_RX_RATE,
+ HWSIM_ATTR_SIGNAL,
+ HWSIM_ATTR_TX_INFO,
+ HWSIM_ATTR_COOKIE,
+ HWSIM_ATTR_CHANNELS,
+ HWSIM_ATTR_RADIO_ID,
+ HWSIM_ATTR_REG_HINT_ALPHA2,
+ HWSIM_ATTR_REG_CUSTOM_REG,
+ HWSIM_ATTR_REG_STRICT_REG,
+ HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+ HWSIM_ATTR_USE_CHANCTX,
+ HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE,
+ HWSIM_ATTR_RADIO_NAME,
+ HWSIM_ATTR_NO_VIF,
+ HWSIM_ATTR_FREQ,
+ HWSIM_ATTR_PAD,
+ HWSIM_ATTR_TX_INFO_FLAGS,
+ HWSIM_ATTR_PERM_ADDR,
+ HWSIM_ATTR_IFTYPE_SUPPORT,
+ HWSIM_ATTR_CIPHER_SUPPORT,
+};
+
+struct hwsim_tx_rate {
+ int8_t idx;
+ uint8_t count;
+} __packed;
+static_assert(sizeof(hwsim_tx_rate) == 2);
+
+#undef BIT
diff --git a/automotive/can/1.0/default/libnl++/printer.cpp b/automotive/can/1.0/default/libnl++/printer.cpp
index f08897e..d540482 100644
--- a/automotive/can/1.0/default/libnl++/printer.cpp
+++ b/automotive/can/1.0/default/libnl++/printer.cpp
@@ -154,16 +154,19 @@
}
}
-std::string toString(const Buffer<nlmsghdr> hdr, int protocol, bool printPayload) {
- if (!hdr.firstOk()) return "nlmsg{buffer overflow}";
+static void toStream(std::stringstream& ss, const Buffer<nlmsghdr> hdr, int protocol,
+ bool printPayload) {
+ if (!hdr.firstOk()) {
+ ss << "nlmsg{buffer overflow}";
+ return;
+ }
- std::stringstream ss;
ss << std::setfill('0');
auto protocolMaybe = protocols::get(protocol);
if (!protocolMaybe.has_value()) {
ss << "nlmsg{protocol=" << protocol << "}";
- return ss.str();
+ return;
}
protocols::NetlinkProtocol& protocolDescr = *protocolMaybe;
@@ -187,7 +190,7 @@
ss << ", crc=" << std::hex << std::setw(4) << crc16(hdr.data<uint8_t>()) << std::dec;
ss << '}';
- if (!printPayload) return ss.str();
+ if (!printPayload) return;
ss << ' ';
if (!msgDescMaybe.has_value()) {
@@ -210,6 +213,17 @@
}
ss << "}";
+}
+
+std::string toString(const Buffer<nlmsghdr> hdrs, int protocol, bool printPayload) {
+ std::stringstream ss;
+ bool first = true;
+ for (const auto hdr : hdrs) {
+ if (!first) ss << std::endl;
+ first = false;
+
+ toStream(ss, hdr, protocol, printPayload);
+ }
return ss.str();
}
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
index 1e1ad12..478c383 100644
--- a/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
@@ -16,6 +16,7 @@
#include "Ctrl.h"
+#include "families/Mac80211hwsim.h"
#include "families/Nl80211.h"
#include <libnl++/Message.h>
@@ -68,12 +69,15 @@
const auto familyId = msg.attributes.get<uint16_t>(CTRL_ATTR_FAMILY_ID);
const auto familyName = msg.attributes.get<std::string>(CTRL_ATTR_FAMILY_NAME);
- /* For now, we support just a single family. But if you add more, please define proper
+ /* For now, we support just two families. But if you add more, please define proper
* abstraction and not hardcode every name and class here.
*/
if (familyName == "nl80211") {
mFamilyRegister[familyId] = std::make_shared<families::Nl80211>(familyId);
}
+ if (familyName == "MAC80211_HWSIM") {
+ mFamilyRegister[familyId] = std::make_shared<families::Mac80211hwsim>(familyId);
+ }
}
} // namespace android::nl::protocols::generic
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
index b7b811b..f92d6c0 100644
--- a/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
@@ -40,9 +40,9 @@
ss << "genlmsghdr{";
if (commandName.has_value()) {
- ss << "cmd=" << unsigned(data.cmd);
- } else {
ss << "cmd=" << *commandName;
+ } else {
+ ss << "cmd=" << unsigned(data.cmd);
}
ss << ", version=" << unsigned(data.version);
if (data.reserved != 0) ss << ", reserved=" << data.reserved;
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp
new file mode 100644
index 0000000..f85309e
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Mac80211hwsim.h"
+
+#include "../../structs.h"
+#include "common.h"
+
+#include <libnl++/generic/families/mac80211_hwsim.h>
+
+namespace android::nl::protocols::generic::families {
+
+using DataType = AttributeDefinition::DataType;
+using Flags = AttributeDefinition::Flags;
+
+static void hwsim_tx_rateToStream(std::stringstream& ss, const Buffer<nlattr> attr);
+
+static const FlagsMap txControlFlags{
+ {HWSIM_TX_CTL_REQ_TX_STATUS, "REQ_TX"},
+ {HWSIM_TX_CTL_NO_ACK, "NO_ACK"},
+ {HWSIM_TX_STAT_ACK, "ACK"},
+};
+
+// clang-format off
+Mac80211hwsim::Mac80211hwsim(nlmsgtype_t familyId) : GenericMessageBase(familyId, "hwsim", {
+ {HWSIM_CMD_UNSPEC, "UNSPEC"},
+ {HWSIM_CMD_REGISTER, "REGISTER"},
+ {HWSIM_CMD_FRAME, "FRAME"},
+ {HWSIM_CMD_TX_INFO_FRAME, "TX_INFO_FRAME"},
+ {HWSIM_CMD_NEW_RADIO, "NEW_RADIO"},
+ {HWSIM_CMD_DEL_RADIO, "DEL_RADIO"},
+ {HWSIM_CMD_GET_RADIO, "GET_RADIO"},
+ {HWSIM_CMD_ADD_MAC_ADDR, "ADD_MAC_ADDR"},
+ {HWSIM_CMD_DEL_MAC_ADDR, "DEL_MAC_ADDR"},
+}, {
+ {HWSIM_ATTR_UNSPEC, {"UNSPEC"}},
+ {HWSIM_ATTR_ADDR_RECEIVER, {"ADDR_RECEIVER", DataType::Struct, hwaddrToStream}},
+ {HWSIM_ATTR_ADDR_TRANSMITTER, {"ADDR_TRANSMITTER", DataType::Struct, hwaddrToStream}},
+ {HWSIM_ATTR_FRAME, {"FRAME", DataType::Raw, AttributeMap{}, Flags::Verbose}},
+ {HWSIM_ATTR_FLAGS, {"FLAGS", DataType::Struct, flagsToStream(txControlFlags)}},
+ {HWSIM_ATTR_RX_RATE, {"RX_RATE", DataType::Uint}},
+ {HWSIM_ATTR_SIGNAL, {"SIGNAL", DataType::Uint}},
+ {HWSIM_ATTR_TX_INFO, {"TX_INFO", DataType::Struct, hwsim_tx_rateToStream}},
+ {HWSIM_ATTR_COOKIE, {"COOKIE", DataType::Uint}},
+ {HWSIM_ATTR_CHANNELS, {"CHANNELS", DataType::Uint}},
+ {HWSIM_ATTR_RADIO_ID, {"RADIO_ID", DataType::Uint}},
+ {HWSIM_ATTR_REG_HINT_ALPHA2, {"REG_HINT_ALPHA2", DataType::String}},
+ {HWSIM_ATTR_REG_CUSTOM_REG, {"REG_CUSTOM_REG", DataType::Uint}},
+ {HWSIM_ATTR_REG_STRICT_REG, {"REG_STRICT_REG", DataType::Flag}},
+ {HWSIM_ATTR_SUPPORT_P2P_DEVICE, {"SUPPORT_P2P_DEVICE", DataType::Flag}},
+ {HWSIM_ATTR_USE_CHANCTX, {"USE_CHANCTX", DataType::Flag}},
+ {HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, {"DESTROY_RADIO_ON_CLOSE", DataType::Flag}},
+ {HWSIM_ATTR_RADIO_NAME, {"RADIO_NAME", DataType::String}},
+ {HWSIM_ATTR_NO_VIF, {"NO_VIF", DataType::Flag}},
+ {HWSIM_ATTR_FREQ, {"FREQ", DataType::Uint}},
+ {HWSIM_ATTR_PAD, {"PAD", DataType::Uint}},
+ {HWSIM_ATTR_TX_INFO_FLAGS, {"TX_INFO_FLAGS"}}, // hwsim_tx_rate_flag
+ {HWSIM_ATTR_PERM_ADDR, {"PERM_ADDR"}},
+ {HWSIM_ATTR_IFTYPE_SUPPORT, {"IFTYPE_SUPPORT", DataType::Uint}}, // NL80211_IFTYPE_STATION etc
+ {HWSIM_ATTR_CIPHER_SUPPORT, {"CIPHER_SUPPORT", DataType::Struct, arrayToStream<int32_t>}},
+}) {}
+// clang-format on
+
+static void hwsim_tx_rateToStream(std::stringstream& ss, const Buffer<nlattr> attr) {
+ ss << '{';
+ bool first = true;
+ for (const auto rate : attr.data<hwsim_tx_rate>().getRaw()) {
+ if (rate.idx == -1) continue;
+
+ ss << (int)rate.idx << ": " << (unsigned)rate.count;
+
+ if (!first) ss << ", ";
+ first = false;
+ }
+ ss << '}';
+}
+
+} // namespace android::nl::protocols::generic::families
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h
new file mode 100644
index 0000000..c01eb93
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../GenericMessageBase.h"
+
+namespace android::nl::protocols::generic::families {
+
+class Mac80211hwsim : public GenericMessageBase {
+ public:
+ Mac80211hwsim(nlmsgtype_t familyId);
+};
+
+} // namespace android::nl::protocols::generic::families
diff --git a/automotive/can/1.0/default/libnl++/protocols/structs.cpp b/automotive/can/1.0/default/libnl++/protocols/structs.cpp
new file mode 100644
index 0000000..8ff71f0
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/structs.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "structs.h"
+
+#include <iomanip>
+
+namespace android::nl::protocols {
+
+AttributeDefinition::ToStream flagsToStream(FlagsMap flags) {
+ return [flags](std::stringstream& ss, const Buffer<nlattr> attr) {
+ auto val = attr.data<uint64_t>().copyFirst();
+
+ bool first = true;
+ for (const auto& [flag, name] : flags) {
+ if ((val & flag) != flag) continue;
+ val &= ~flag;
+
+ if (!first) ss << '|';
+ first = false;
+
+ ss << name;
+ }
+
+ if (val == 0) return;
+
+ if (!first) ss << '|';
+ ss << std::hex << val << std::dec;
+ };
+}
+
+void hwaddrToStream(std::stringstream& ss, const Buffer<nlattr> attr) {
+ ss << std::hex;
+ bool first = true;
+ for (const auto byte : attr.data<uint8_t>().getRaw()) {
+ if (!first) ss << ':';
+ first = false;
+
+ ss << std::setw(2) << unsigned(byte);
+ }
+ ss << std::dec;
+}
+
+} // namespace android::nl::protocols
diff --git a/automotive/can/1.0/default/libnl++/protocols/structs.h b/automotive/can/1.0/default/libnl++/protocols/structs.h
index 44c17b8..f3a8c44 100644
--- a/automotive/can/1.0/default/libnl++/protocols/structs.h
+++ b/automotive/can/1.0/default/libnl++/protocols/structs.h
@@ -16,6 +16,8 @@
#pragma once
+#include "MessageDefinition.h"
+
#include <sstream>
namespace android::nl::protocols {
@@ -30,4 +32,9 @@
ss << '}';
}
+typedef std::map<uint64_t, std::string> FlagsMap;
+AttributeDefinition::ToStream flagsToStream(FlagsMap flags);
+
+void hwaddrToStream(std::stringstream& ss, const Buffer<nlattr> attr);
+
} // namespace android::nl::protocols
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index abf33a3..81f3198 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -1245,6 +1245,7 @@
.access = VehiclePropertyAccess::READ,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
},
+ .initialValue = {.int32Values = {0 /* ClusterHome */, -1 /* ClusterNone */}},
},
{
.config =
@@ -1253,6 +1254,8 @@
.access = VehiclePropertyAccess::READ,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
},
+ .initialValue = {.int32Values = {0 /* Off */, -1, -1, -1, -1 /* Bounds */,
+ -1, -1, -1, -1 /* Insets */}},
},
{
.config =
@@ -1305,6 +1308,9 @@
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
.configArray = {0, 0, 0, 11, 0, 0, 0, 0, 16},
},
+ .initialValue = {.int32Values = {0 /* Off */, -1, -1, -1, -1 /* Bounds */,
+ -1, -1, -1, -1 /* Insets */,
+ 0 /* ClusterHome */, -1 /* ClusterNone */}},
},
{
.config =
@@ -1313,6 +1319,7 @@
.access = VehiclePropertyAccess::READ,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
},
+ .initialValue = {.int32Values = {0 /* ClusterHome */}},
},
{
.config =
diff --git a/biometrics/face/aidl/vts/Android.bp b/biometrics/face/aidl/vts/Android.bp
index c5660b1..99c8c99 100644
--- a/biometrics/face/aidl/vts/Android.bp
+++ b/biometrics/face/aidl/vts/Android.bp
@@ -14,9 +14,14 @@
"use_libaidlvintf_gtest_helper_static",
],
srcs: ["VtsHalBiometricsFaceTargetTest.cpp"],
+ static_libs: [
+ "android.hardware.biometrics.common-V1-ndk_platform",
+ "android.hardware.biometrics.face-V1-ndk_platform",
+ "android.hardware.common-V2-ndk_platform",
+ "android.hardware.keymaster-V3-ndk_platform",
+ ],
shared_libs: [
"libbinder_ndk",
- "android.hardware.biometrics.face-V1-ndk_platform",
],
test_suites: [
"general-tests",
diff --git a/biometrics/fingerprint/aidl/default/tests/WorkerThreadTest.cpp b/biometrics/fingerprint/aidl/default/tests/WorkerThreadTest.cpp
index c548fe5..8443336 100644
--- a/biometrics/fingerprint/aidl/default/tests/WorkerThreadTest.cpp
+++ b/biometrics/fingerprint/aidl/default/tests/WorkerThreadTest.cpp
@@ -40,8 +40,7 @@
promise.set_value();
})));
- auto status = future.wait_for(1s);
- EXPECT_EQ(status, std::future_status::ready);
+ future.wait();
}
}
@@ -56,12 +55,11 @@
// Notify that the task has started.
promise.set_value();
// Block for a "very long" time.
- std::this_thread::sleep_for(2s);
+ std::this_thread::sleep_for(1s);
})));
// Make sure the long-running task began executing.
- auto status = future.wait_for(1s);
- ASSERT_EQ(status, std::future_status::ready);
+ future.wait();
// The first task is already being worked on, which means the queue must be empty.
// Fill the worker's queue to the maximum.
@@ -91,8 +89,7 @@
// Schedule a special task to signal when all of the tasks are finished.
worker.schedule(
Callable::from([promise = std::move(promise)]() mutable { promise.set_value(); }));
- auto status = future.wait_for(1s);
- ASSERT_EQ(status, std::future_status::ready);
+ future.wait();
ASSERT_EQ(results.size(), NUM_TASKS);
EXPECT_TRUE(std::is_sorted(results.begin(), results.end()));
@@ -115,8 +112,7 @@
})));
// The first task should start executing.
- auto status = future1.wait_for(1s);
- ASSERT_EQ(status, std::future_status::ready);
+ future1.wait();
// The second task should schedule successfully.
ASSERT_TRUE(
@@ -128,8 +124,7 @@
}
// The second task should never execute.
- auto status = future2.wait_for(1s);
- ASSERT_EQ(status, std::future_status::ready);
+ future2.wait();
// The future is expected to be ready but contain an exception.
// Cannot use ASSERT_THROW because exceptions are disabled in this codebase.
// ASSERT_THROW(future2.get(), std::future_error);
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index 7bb300e..deb420d 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -7155,17 +7155,24 @@
sp<device::V3_5::ICameraDevice>* device3_5 /*out*/,
sp<device::V3_7::ICameraDevice>* device3_7 /*out*/) {
ASSERT_NE(nullptr, device3_5);
- if (deviceVersion == CAMERA_DEVICE_API_VERSION_3_5) {
- auto castResult = device::V3_5::ICameraDevice::castFrom(device);
- ASSERT_TRUE(castResult.isOk());
- *device3_5 = castResult;
- }
-
ASSERT_NE(nullptr, device3_7);
- if (deviceVersion == CAMERA_DEVICE_API_VERSION_3_7) {
- auto castResult = device::V3_7::ICameraDevice::castFrom(device);
- ASSERT_TRUE(castResult.isOk());
- *device3_7 = castResult;
+
+ switch (deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_3_7: {
+ auto castResult = device::V3_7::ICameraDevice::castFrom(device);
+ ASSERT_TRUE(castResult.isOk());
+ *device3_7 = castResult;
+ }
+ [[fallthrough]];
+ case CAMERA_DEVICE_API_VERSION_3_5: {
+ auto castResult = device::V3_5::ICameraDevice::castFrom(device);
+ ASSERT_TRUE(castResult.isOk());
+ *device3_5 = castResult;
+ break;
+ }
+ default:
+ // no-op
+ return;
}
}
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 01cd1f6..32179b9 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -90,7 +90,7 @@
<version>2.0</version>
<interface>
<name>IVehicle</name>
- <instance>default</instance>
+ <regex-instance>.*</regex-instance>
</interface>
</hal>
<hal format="hidl" optional="true">
@@ -521,12 +521,20 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.soundtrigger</name>
- <version>2.3</version>
+ <version>2.3-4</version>
<interface>
<name>ISoundTriggerHw</name>
<instance>default</instance>
</interface>
</hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.soundtrigger3</name>
+ <version>1</version>
+ <interface>
+ <name>ISoundTriggerHw</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
<hal format="hidl" optional="true">
<name>android.hardware.tetheroffload.config</name>
<version>1.0</version>
diff --git a/current.txt b/current.txt
index bdbe1b8..3102972 100644
--- a/current.txt
+++ b/current.txt
@@ -785,6 +785,8 @@
550619f876cadbea1f718edce120f0e1dd4a6f4bd4c28b59d479677dc86b0aec android.hardware.neuralnetworks@1.3::types
c3fec5bd470984402997f78a74b6511efc4063b270f2bd9ee7b78f48b683a1bb android.hardware.neuralnetworks@1.3::IDevice
0fdfad62c2ec33b52e6687004e5a1971c02d10b93ee4d26df5ccff7ce032494a android.hardware.neuralnetworks@1.3::IPreparedModel
+b40c13f9a9affc806c778c1f8c78e90d4acb50f1d6a6be185d933d7a04b91c5b android.hardware.sensors@1.0::ISensors
+432086950205f5876da85dbd42004b0d0d05b429b9494b4f76a4d888758c5bd8 android.hardware.sensors@1.0::types
e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
@@ -854,7 +856,7 @@
f22813615be1445ddd817655c054fc69dc9efea56c9035cd0757f3cbed190641 android.hardware.radio.config@1.3::IRadioConfig
c9ad18729268593d14681d88ffad1c97e707444a45e1b4ed804dab949edbd84f android.hardware.radio.config@1.3::IRadioConfigResponse
fd43298c43f70130c747a642ee43b0c242ac0cebffb377faa24f2725f0aa6caf android.hardware.tetheroffload.control@1.1::IOffloadControl
-fe18c9032e4063efca3fff3c377dd69780de1f96e8e2bc3f7d100a5d8bd467b4 android.hardware.tetheroffload.control@1.1::ITetheringOffloadCallback
+ead4ec8713a2cb40906fe31ba793d21a6b1190143c446690d16a6ea686aa2fea android.hardware.tetheroffload.control@1.1::ITetheringOffloadCallback
e34b4c7bec5e032c14804707ca924dd6b99ed5ba139da7505fe7d698d0fe178f android.hardware.tetheroffload.control@1.1::types
63dfdb433ac73fb2bf4a44d2ade7b7e289e155835206d1939640d6c88d208994 android.hardware.tv.cec@1.1::IHdmiCec
b9682587677ce9c872e04f0e9fd6c9c78a56ae795c07cbf8c50100e0351d4c44 android.hardware.tv.cec@1.1::IHdmiCecCallback
diff --git a/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp b/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
index 237e8ec..699ce9a 100644
--- a/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
+++ b/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
@@ -135,12 +135,29 @@
}
/*
+ * SetPositionMode:
+ * Helper function to set positioning mode and verify output
+ */
+ void SetPositionMode(const int min_interval_msec) {
+ const int kPreferredAccuracy = 0; // Ideally perfect (matches GnssLocationProvider)
+ const int kPreferredTimeMsec = 0; // Ideally immediate
+
+ auto result = gnss_hal_->setPositionMode(
+ IGnss::GnssPositionMode::MS_BASED, IGnss::GnssPositionRecurrence::RECURRENCE_PERIODIC,
+ min_interval_msec, kPreferredAccuracy, kPreferredTimeMsec);
+
+ ASSERT_TRUE(result.isOk());
+ EXPECT_TRUE(result);
+ }
+
+ /*
* StartAndGetSingleLocation:
* Helper function to get one Location and check fields
*
* returns true if a location was successfully generated
*/
- bool StartAndGetSingleLocation(bool checkAccuracies) {
+ bool StartAndGetSingleLocation(const bool checkAccuracies, const int min_interval_msec) {
+ SetPositionMode(min_interval_msec);
auto result = gnss_hal_->start();
EXPECT_TRUE(result.isOk());
@@ -349,37 +366,24 @@
* and checks them for reasonable validity.
*/
TEST_P(GnssHalTest, GetLocation) {
-#define MIN_INTERVAL_MSEC 500
-#define PREFERRED_ACCURACY 0 // Ideally perfect (matches GnssLocationProvider)
-#define PREFERRED_TIME_MSEC 0 // Ideally immediate
+ const int kMinIntervalMsec = 500;
+ const int kLocationTimeoutSubsequentSec = 3;
+ const int kLocationsToCheck = 5;
-#define LOCATION_TIMEOUT_SUBSEQUENT_SEC 3
-#define LOCATIONS_TO_CHECK 5
+ bool checkMoreAccuracies = (info_called_count_ > 0 && last_info_.yearOfHw >= 2017);
- bool checkMoreAccuracies =
- (info_called_count_ > 0 && last_info_.yearOfHw >= 2017);
+ /*
+ * GPS signals initially optional for this test, so don't expect timeout yet.
+ */
+ bool gotLocation = StartAndGetSingleLocation(checkMoreAccuracies, kMinIntervalMsec);
- auto result = gnss_hal_->setPositionMode(
- IGnss::GnssPositionMode::MS_BASED,
- IGnss::GnssPositionRecurrence::RECURRENCE_PERIODIC, MIN_INTERVAL_MSEC,
- PREFERRED_ACCURACY, PREFERRED_TIME_MSEC);
-
- ASSERT_TRUE(result.isOk());
- EXPECT_TRUE(result);
-
- /*
- * GPS signals initially optional for this test, so don't expect no timeout
- * yet
- */
- bool gotLocation = StartAndGetSingleLocation(checkMoreAccuracies);
-
- if (gotLocation) {
- for (int i = 1; i < LOCATIONS_TO_CHECK; i++) {
- EXPECT_EQ(std::cv_status::no_timeout, wait(LOCATION_TIMEOUT_SUBSEQUENT_SEC));
- EXPECT_EQ(location_called_count_, i + 1);
- CheckLocation(last_location_, checkMoreAccuracies, true);
+ if (gotLocation) {
+ for (int i = 1; i < kLocationsToCheck; i++) {
+ EXPECT_EQ(std::cv_status::no_timeout, wait(kLocationTimeoutSubsequentSec));
+ EXPECT_EQ(location_called_count_, i + 1);
+ CheckLocation(last_location_, checkMoreAccuracies, true);
+ }
}
- }
StopAndClearLocations();
}
@@ -410,7 +414,7 @@
ASSERT_TRUE(resultVoid.isOk());
// Ensure we can get a good location after a bad injection has been deleted
- StartAndGetSingleLocation(false);
+ StartAndGetSingleLocation(false, /* min_interval_sec= */ 1000);
StopAndClearLocations();
}
@@ -430,7 +434,7 @@
ASSERT_TRUE(result.isOk());
EXPECT_TRUE(result);
- StartAndGetSingleLocation(false);
+ StartAndGetSingleLocation(false, /* min_interval_msec= */ 1000);
// Ensure we don't get a location anywhere within 111km (1 degree of lat or lng) of the seed
// location.
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.cpp b/gnss/1.1/vts/functional/gnss_hal_test.cpp
index 52aaa69..6663a19 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test.cpp
@@ -99,7 +99,9 @@
EXPECT_TRUE(result);
}
-bool GnssHalTest::StartAndCheckFirstLocation(bool strict) {
+bool GnssHalTest::StartAndCheckFirstLocation(const bool strict, const int min_interval_msec,
+ const bool low_power_mode) {
+ SetPositionMode(min_interval_msec, low_power_mode);
auto result = gnss_hal_->start();
EXPECT_TRUE(result.isOk());
@@ -141,7 +143,9 @@
SetPositionMode(kMinIntervalMsec, kLowPowerMode);
- EXPECT_TRUE(StartAndCheckFirstLocation(/* strict= */ true));
+ EXPECT_TRUE(StartAndCheckFirstLocation(/* strict= */ true,
+ /* min_interval_msec= */ 1000,
+ /* low_power_mode= */ false));
for (int i = 1; i < count; i++) {
EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.h b/gnss/1.1/vts/functional/gnss_hal_test.h
index 75c4216..c642028 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.h
+++ b/gnss/1.1/vts/functional/gnss_hal_test.h
@@ -106,7 +106,8 @@
*
* returns true if a location was successfully generated
*/
- bool StartAndCheckFirstLocation(bool strict);
+ bool StartAndCheckFirstLocation(const bool strict, const int min_interval_msec,
+ const bool low_power_mode);
/*
* CheckLocation:
diff --git a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
index e6a51eb..ef64324 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
@@ -90,10 +90,8 @@
gnss_cb_->location_cbq_.reset();
// Start of Low Power Mode test
- SetPositionMode(kMinIntervalMsec, kLowPowerMode);
-
// Don't expect true - as without AGPS access
- if (!StartAndCheckFirstLocation(/* strict= */ false)) {
+ if (!StartAndCheckFirstLocation(/* strict= */ false, kMinIntervalMsec, kLowPowerMode)) {
ALOGW("GetLocationLowPower test - no first low power location received.");
}
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.cpp b/gnss/2.0/vts/functional/gnss_hal_test.cpp
index 1cb44c5..5227693 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test.cpp
@@ -97,7 +97,9 @@
EXPECT_TRUE(result);
}
-bool GnssHalTest::StartAndCheckFirstLocation(bool strict) {
+bool GnssHalTest::StartAndCheckFirstLocation(const bool strict, const int min_interval_msec,
+ const bool low_power_mode) {
+ SetPositionMode(min_interval_msec, low_power_mode);
const auto result = gnss_hal_->start();
EXPECT_TRUE(result.isOk());
@@ -137,7 +139,9 @@
SetPositionMode(kMinIntervalMsec, kLowPowerMode);
- EXPECT_TRUE(StartAndCheckFirstLocation(/* strict= */ true));
+ EXPECT_TRUE(StartAndCheckFirstLocation(/* strict= */ true,
+ /* min_interval_msec= */ 1000,
+ /* low_power_mode= */ false));
for (int i = 1; i < count; i++) {
EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.h b/gnss/2.0/vts/functional/gnss_hal_test.h
index 7fbd735..28a1979 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.h
+++ b/gnss/2.0/vts/functional/gnss_hal_test.h
@@ -159,7 +159,8 @@
*
* returns true if a location was successfully generated
*/
- bool StartAndCheckFirstLocation(bool strict);
+ bool StartAndCheckFirstLocation(const bool strict, const int min_interval_msec,
+ const bool low_power_mode);
/*
* CheckLocation:
diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
index 3e0058f..f57c599 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
@@ -403,7 +403,9 @@
}
TEST_P(GnssHalTest, TestGnssLocationElapsedRealtime) {
- StartAndCheckFirstLocation(/* strict= */ true);
+ StartAndCheckFirstLocation(/* strict= */ true,
+ /* min_interval_msec= */ 1000,
+ /* low_power_mode= */ false);
ASSERT_TRUE((int)gnss_cb_->last_location_.elapsedRealtime.flags <=
(int)(ElapsedRealtimeFlags::HAS_TIMESTAMP_NS |
@@ -419,7 +421,9 @@
// This test only verify that injectBestLocation_2_0 does not crash.
TEST_P(GnssHalTest, TestInjectBestLocation_2_0) {
- StartAndCheckFirstLocation(/* strict= */ true);
+ StartAndCheckFirstLocation(/* strict= */ true,
+ /* min_interval_msec= */ 1000,
+ /* low_power_mode= */ false);
gnss_hal_->injectBestLocation_2_0(gnss_cb_->last_location_);
StopAndClearLocations();
}
@@ -460,10 +464,10 @@
gnss_cb_->location_cbq_.reset();
// Start of Low Power Mode test
- SetPositionMode(kMinIntervalMsec, kLowPowerMode);
-
// Don't expect true - as without AGPS access
- if (!StartAndCheckFirstLocation(/* strict= */ false)) {
+ if (!StartAndCheckFirstLocation(/* strict= */ false,
+ /* min_interval_msec= */ kMinIntervalMsec,
+ /* low_power_mode= */ kLowPowerMode)) {
ALOGW("GetLocationLowPower test - no first low power location received.");
}
diff --git a/gnss/2.1/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.1/vts/functional/gnss_hal_test_cases.cpp
index deb80e8..fcab8c4 100644
--- a/gnss/2.1/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.1/vts/functional/gnss_hal_test_cases.cpp
@@ -254,7 +254,7 @@
*/
TEST_P(GnssHalTest, TestGnssSvInfoFields) {
gnss_cb_->location_cbq_.reset();
- StartAndCheckFirstLocation();
+ StartAndCheckFirstLocation(/* min_interval_msec= */ 1000, /* low_power_mode= */ false);
int location_called_count = gnss_cb_->location_cbq_.calledCount();
// Tolerate 1 less sv status to handle edge cases in reporting.
diff --git a/gnss/aidl/aidl_api/android.hardware.gnss/current/android/hardware/gnss/SatellitePvt.aidl b/gnss/aidl/aidl_api/android.hardware.gnss/current/android/hardware/gnss/SatellitePvt.aidl
index 747ee90..8c17841 100644
--- a/gnss/aidl/aidl_api/android.hardware.gnss/current/android/hardware/gnss/SatellitePvt.aidl
+++ b/gnss/aidl/aidl_api/android.hardware.gnss/current/android/hardware/gnss/SatellitePvt.aidl
@@ -34,9 +34,13 @@
package android.hardware.gnss;
@VintfStability
parcelable SatellitePvt {
+ int flags;
android.hardware.gnss.SatellitePositionEcef satPosEcef;
android.hardware.gnss.SatelliteVelocityEcef satVelEcef;
android.hardware.gnss.SatelliteClockInfo satClockInfo;
double ionoDelayMeters;
double tropoDelayMeters;
+ const int HAS_POSITION_VELOCITY_CLOCK_INFO = 1;
+ const int HAS_IONO = 2;
+ const int HAS_TROPO = 4;
}
diff --git a/gnss/aidl/android/hardware/gnss/GnssMeasurement.aidl b/gnss/aidl/android/hardware/gnss/GnssMeasurement.aidl
index 336e927..58f29c5 100644
--- a/gnss/aidl/android/hardware/gnss/GnssMeasurement.aidl
+++ b/gnss/aidl/android/hardware/gnss/GnssMeasurement.aidl
@@ -625,6 +625,17 @@
* The position and velocity must be in ECEF coordinates.
*
* If the data is available, gnssMeasurementFlags must contain HAS_SATELLITE_PVT.
+ *
+ * If SatellitePvt is derived from Broadcast ephemeris, then the position is already w.r.t.
+ * the antenna phase center. However, if SatellitePvt is derived from other modeled orbits,
+ * such as long-term-orbits, or precise orbits, then the orbits may have been computed w.r.t.
+ * the satellite center of mass, and then GNSS vendors are expected to correct for the effect
+ * on different phase centers (can differ by meters) of different GNSS signals (e.g. L1, L5)
+ * on the reported satellite position. Accordingly, we might observe a different satellite
+ * position reported for L1 GnssMeasurement struct compared to L5 GnssMeasurement struct.
+ *
+ * If receivedSvTimeNs is not fully decoded, Satellite PVT could still be reported and
+ * receivedSvTimeNs uncertainty field would be used to provide confidence.
*/
SatellitePvt satellitePvt;
diff --git a/gnss/aidl/android/hardware/gnss/SatelliteClockInfo.aidl b/gnss/aidl/android/hardware/gnss/SatelliteClockInfo.aidl
index 844fd1c..4b7d5d6 100644
--- a/gnss/aidl/android/hardware/gnss/SatelliteClockInfo.aidl
+++ b/gnss/aidl/android/hardware/gnss/SatelliteClockInfo.aidl
@@ -24,6 +24,14 @@
/**
* Satellite hardware code bias of the reported code type w.r.t
* ionosphere-free measurement in meters.
+ *
+ * When broadcast ephemeris is used, this is the offset caused
+ * by the satellite hardware delays at different frequencies;
+ * e.g. in IS-GPS-705D, this term is described in Section
+ * 20.3.3.3.1.2.1.
+ *
+ * For GPS this term is ~10ns, and affects the satellite position
+ * computation by less than a millimeter.
*/
double satHardwareCodeBiasMeters;
@@ -31,9 +39,20 @@
* Satellite time correction for ionospheric-free signal measurement
* (meters). The satellite clock correction for the given signal type
* = satTimeCorrectionMeters - satHardwareCodeBiasMeters.
+ *
+ * When broadcast ephemeris is used, this is the offset modeled in the
+ * clock terms broadcast over the air by the satellites;
+ * e.g. in IS-GPS-200H, Section 20.3.3.3.3.1, this term is
+ * ∆tsv = af0 + af1(t - toc) + af2(t - toc)^2 + ∆tr.
+ *
+ * If another source of ephemeris is used for SatellitePvt, then the
+ * equivalent value of satTimeCorrection must be provided.
+ *
+ * For GPS this term is ~1ms, and affects the satellite position
+ * computation by ~1m.
*/
double satTimeCorrectionMeters;
/** Satellite clock drift (meters per second). */
double satClkDriftMps;
-}
\ No newline at end of file
+}
diff --git a/gnss/aidl/android/hardware/gnss/SatellitePvt.aidl b/gnss/aidl/android/hardware/gnss/SatellitePvt.aidl
index ea55f0c..a238e3f 100644
--- a/gnss/aidl/android/hardware/gnss/SatellitePvt.aidl
+++ b/gnss/aidl/android/hardware/gnss/SatellitePvt.aidl
@@ -16,9 +16,9 @@
package android.hardware.gnss;
+import android.hardware.gnss.SatelliteClockInfo;
import android.hardware.gnss.SatellitePositionEcef;
import android.hardware.gnss.SatelliteVelocityEcef;
-import android.hardware.gnss.SatelliteClockInfo;
/**
* Contains estimates of the satellite position, velocity and time in the
@@ -27,6 +27,34 @@
@VintfStability
parcelable SatellitePvt {
/**
+ * Bit mask indicating valid satellite position, velocity and clock info fields are
+ * stored in the SatellitePvt.
+ */
+ const int HAS_POSITION_VELOCITY_CLOCK_INFO = 1 << 0;
+
+ /**
+ * Bit mask indicating a valid iono delay field is stored in the SatellitePvt.
+ */
+ const int HAS_IONO = 1 << 1;
+
+ /**
+ * Bit mask indicating a valid tropo delay field is stored in the SatellitePvt.
+ */
+ const int HAS_TROPO = 1 << 2;
+
+ /**
+ * A bitfield of flags indicating the validity of the fields in this SatellitePvt.
+ * The bit masks are defined in the constants with prefix HAS_*
+ *
+ * Fields for which there is no corresponding flag must be filled in with a valid value.
+ * For convenience, these are marked as mandatory.
+ *
+ * Others fields may have invalid information in them, if not marked as valid by the
+ * corresponding bit in flags.
+ */
+ int flags;
+
+ /**
* Satellite position in WGS84 ECEF. See comments of
* SatellitePositionEcef for units.
*/
@@ -46,4 +74,4 @@
/** Tropospheric delay in meters. */
double tropoDelayMeters;
-}
\ No newline at end of file
+}
diff --git a/gnss/aidl/vts/gnss_hal_test_cases.cpp b/gnss/aidl/vts/gnss_hal_test_cases.cpp
index 9086b3d..67ccf52 100644
--- a/gnss/aidl/vts/gnss_hal_test_cases.cpp
+++ b/gnss/aidl/vts/gnss_hal_test_cases.cpp
@@ -38,6 +38,7 @@
using android::hardware::gnss::IGnssPowerIndication;
using android::hardware::gnss::IGnssPsds;
using android::hardware::gnss::PsdsType;
+using android::hardware::gnss::SatellitePvt;
using GnssConstellationTypeAidl = android::hardware::gnss::GnssConstellationType;
@@ -128,22 +129,39 @@
GnssMeasurement::HAS_SATELLITE_PVT |
GnssMeasurement::HAS_CORRELATION_VECTOR));
- if ((measurement.flags & GnssMeasurement::HAS_SATELLITE_PVT) &&
- (has_capability_satpvt == true)) {
- ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posXMeters >= -43000000 &&
- measurement.satellitePvt.satPosEcef.posXMeters <= 43000000);
- ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posYMeters >= -43000000 &&
- measurement.satellitePvt.satPosEcef.posYMeters <= 43000000);
- ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posZMeters >= -43000000 &&
- measurement.satellitePvt.satPosEcef.posZMeters <= 43000000);
- ASSERT_TRUE(measurement.satellitePvt.satPosEcef.ureMeters > 0);
- ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velXMps >= -4000 &&
- measurement.satellitePvt.satVelEcef.velXMps <= 4000);
- ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velYMps >= -4000 &&
- measurement.satellitePvt.satVelEcef.velYMps <= 4000);
- ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velZMps >= -4000 &&
- measurement.satellitePvt.satVelEcef.velZMps <= 4000);
- ASSERT_TRUE(measurement.satellitePvt.satVelEcef.ureRateMps > 0);
+ if (measurement.flags & GnssMeasurement::HAS_SATELLITE_PVT &&
+ has_capability_satpvt == true) {
+ if (measurement.satellitePvt.flags & SatellitePvt::HAS_POSITION_VELOCITY_CLOCK_INFO) {
+ ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posXMeters >= -43000000 &&
+ measurement.satellitePvt.satPosEcef.posXMeters <= 43000000);
+ ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posYMeters >= -43000000 &&
+ measurement.satellitePvt.satPosEcef.posYMeters <= 43000000);
+ ASSERT_TRUE(measurement.satellitePvt.satPosEcef.posZMeters >= -43000000 &&
+ measurement.satellitePvt.satPosEcef.posZMeters <= 43000000);
+ ASSERT_TRUE(measurement.satellitePvt.satPosEcef.ureMeters > 0);
+ ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velXMps >= -4000 &&
+ measurement.satellitePvt.satVelEcef.velXMps <= 4000);
+ ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velYMps >= -4000 &&
+ measurement.satellitePvt.satVelEcef.velYMps <= 4000);
+ ASSERT_TRUE(measurement.satellitePvt.satVelEcef.velZMps >= -4000 &&
+ measurement.satellitePvt.satVelEcef.velZMps <= 4000);
+ ASSERT_TRUE(measurement.satellitePvt.satVelEcef.ureRateMps > 0);
+ ASSERT_TRUE(
+ measurement.satellitePvt.satClockInfo.satHardwareCodeBiasMeters > -17.869 &&
+ measurement.satellitePvt.satClockInfo.satHardwareCodeBiasMeters < 17.729);
+ ASSERT_TRUE(measurement.satellitePvt.satClockInfo.satTimeCorrectionMeters > -3e6 &&
+ measurement.satellitePvt.satClockInfo.satTimeCorrectionMeters < 3e6);
+ ASSERT_TRUE(measurement.satellitePvt.satClockInfo.satClkDriftMps > -1.117 &&
+ measurement.satellitePvt.satClockInfo.satClkDriftMps < 1.117);
+ }
+ if (measurement.satellitePvt.flags & SatellitePvt::HAS_IONO) {
+ ASSERT_TRUE(measurement.satellitePvt.ionoDelayMeters > 0 &&
+ measurement.satellitePvt.ionoDelayMeters < 100);
+ }
+ if (measurement.satellitePvt.flags & SatellitePvt::HAS_TROPO) {
+ ASSERT_TRUE(measurement.satellitePvt.tropoDelayMeters > 0 &&
+ measurement.satellitePvt.tropoDelayMeters < 100);
+ }
}
if (kIsCorrelationVectorSupported &&
@@ -201,7 +219,7 @@
// Get a location and request another GnssPowerStats
gnss_cb_->location_cbq_.reset();
- StartAndCheckFirstLocation();
+ StartAndCheckFirstLocation(/* min_interval_msec= */ 1000, /* low_power_mode= */ false);
// Request and verify the 2nd GnssPowerStats has larger values than the 1st one
iGnssPowerIndication->requestGnssPowerStats();
diff --git a/gnss/common/utils/default/Utils.cpp b/gnss/common/utils/default/Utils.cpp
index 9bc6786..569dac4 100644
--- a/gnss/common/utils/default/Utils.cpp
+++ b/gnss/common/utils/default/Utils.cpp
@@ -31,6 +31,7 @@
using aidl::android::hardware::gnss::GnssMeasurement;
using aidl::android::hardware::gnss::IGnss;
using aidl::android::hardware::gnss::IGnssMeasurementCallback;
+using aidl::android::hardware::gnss::SatellitePvt;
using GnssSvFlags = V1_0::IGnssCallback::GnssSvFlags;
using GnssMeasurementFlagsV1_0 = V1_0::IGnssMeasurementCallback::GnssMeasurementFlags;
@@ -175,7 +176,9 @@
.fullInterSignalBiasUncertaintyNs = 792.0,
.satelliteInterSignalBiasNs = 233.9,
.satelliteInterSignalBiasUncertaintyNs = 921.2,
- .satellitePvt = {.satPosEcef = {.posXMeters = 10442993.1153328,
+ .satellitePvt = {.flags = SatellitePvt::HAS_POSITION_VELOCITY_CLOCK_INFO |
+ SatellitePvt::HAS_IONO | SatellitePvt::HAS_TROPO,
+ .satPosEcef = {.posXMeters = 10442993.1153328,
.posYMeters = -19926932.8051666,
.posZMeters = -12034295.0216203,
.ureMeters = 1000.2345678},
diff --git a/gnss/common/utils/vts/include/v2_1/gnss_hal_test_template.h b/gnss/common/utils/vts/include/v2_1/gnss_hal_test_template.h
index fec3503..03166be 100644
--- a/gnss/common/utils/vts/include/v2_1/gnss_hal_test_template.h
+++ b/gnss/common/utils/vts/include/v2_1/gnss_hal_test_template.h
@@ -107,7 +107,7 @@
*
* returns true if a location was successfully generated
*/
- bool StartAndCheckFirstLocation();
+ bool StartAndCheckFirstLocation(const int min_interval_msec, const bool low_power_mode);
/*
* CheckLocation:
@@ -234,7 +234,9 @@
}
template <class T_IGnss>
-bool GnssHalTestTemplate<T_IGnss>::StartAndCheckFirstLocation() {
+bool GnssHalTestTemplate<T_IGnss>::StartAndCheckFirstLocation(const int min_interval_msec,
+ const bool low_power_mode) {
+ SetPositionMode(min_interval_msec, low_power_mode);
const auto result = gnss_hal_->start();
EXPECT_TRUE(result.isOk());
@@ -274,9 +276,7 @@
const int kLocationTimeoutSubsequentSec = 2;
const bool kLowPowerMode = false;
- SetPositionMode(kMinIntervalMsec, kLowPowerMode);
-
- EXPECT_TRUE(StartAndCheckFirstLocation());
+ EXPECT_TRUE(StartAndCheckFirstLocation(kMinIntervalMsec, kLowPowerMode));
for (int i = 1; i < count; i++) {
EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
diff --git a/identity/aidl/default/libeic/EicCbor.c b/identity/aidl/default/libeic/EicCbor.c
index fe131eb..0e2684f 100644
--- a/identity/aidl/default/libeic/EicCbor.c
+++ b/identity/aidl/default/libeic/EicCbor.c
@@ -114,7 +114,7 @@
data[4] = size & 0xff;
eicCborAppend(cbor, data, 5);
} else {
- data[0] = (majorType << 5) | 24;
+ data[0] = (majorType << 5) | 27;
data[1] = (((uint64_t)size) >> 56) & 0xff;
data[2] = (((uint64_t)size) >> 48) & 0xff;
data[3] = (((uint64_t)size) >> 40) & 0xff;
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index e0d60fc..9e37ed0 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -921,6 +921,23 @@
.Authorization(TAG_MIN_MAC_LENGTH, 128)));
}
+/**
+ * NewKeyGenerationTest.AesInvalidKeySize
+ *
+ * Verifies that specifying an invalid key size for AES key generation returns
+ * UNSUPPORTED_KEY_SIZE.
+ */
+TEST_P(NewKeyGenerationTest, AesInvalidKeySize) {
+ for (auto key_size : InvalidKeySizes(Algorithm::AES)) {
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(key_size)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::ECB)
+ .Padding(PaddingMode::NONE)));
+ }
+}
+
INSTANTIATE_KEYMASTER_HIDL_TEST(NewKeyGenerationTest);
typedef KeymasterHidlTest SigningOperationsTest;
diff --git a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
index e46cb48..0639da8 100644
--- a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
+++ b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
@@ -243,7 +243,9 @@
EXPECT_EQ(ErrorCode::OK, result);
EXPECT_EQ(2U, cert_chain.size());
- if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+ if (dumpAttestations) {
+ for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+ }
auto [err, attestation] = parse_attestation_record(cert_chain[0]);
ASSERT_EQ(ErrorCode::OK, err);
@@ -287,7 +289,9 @@
EXPECT_EQ(ErrorCode::OK, result);
EXPECT_EQ(2U, cert_chain.size());
- if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+ if (dumpAttestations) {
+ for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+ }
auto [err, attestation] = parse_attestation_record(cert_chain[0]);
ASSERT_EQ(ErrorCode::OK, err);
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index 7849ca7..8bd2fbe 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -48,6 +48,10 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
private:
const nn::SharedPreparedModel kPreparedModel;
};
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
new file mode 100644
index 0000000..e201e25
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index 8853eea..48be595 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -57,10 +57,17 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const;
+
private:
const sp<V1_0::IPreparedModel> kPreparedModel;
const hal::utils::DeathHandler kDeathHandler;
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index e3a9757..1284721 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -55,4 +55,10 @@
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
}
+nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+}
+
} // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Execution.cpp b/neuralnetworks/1.0/utils/src/Execution.cpp
new file mode 100644
index 0000000..7a3216b
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/Execution.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_0::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kPreparedModel->executeInternal(kRequest, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on 1.0 HAL service";
+}
+
+} // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 858571d..00970c0 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -19,6 +19,7 @@
#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "Utils.h"
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
@@ -61,22 +62,35 @@
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+ return executeInternal(hidlRequest, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const V1_0::Request& request,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
const auto cb = sp<ExecutionCallback>::make();
const auto scoped = kDeathHandler.protectCallback(cb.get());
- const auto ret = kPreparedModel->execute(hidlRequest, cb);
+ const auto ret = kPreparedModel->execute(request, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
auto result = NN_TRY(cb->get());
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return result;
}
@@ -91,6 +105,20 @@
<< "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation));
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
return Burst::create(shared_from_this());
}
diff --git a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
index f19ed77..7820c06 100644
--- a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
@@ -19,6 +19,7 @@
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -224,6 +225,150 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
+TEST(PreparedModelTest, reusableExecute) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecute(V1_0::ErrorStatus::GENERAL_FAILURE,
+ V1_0::ErrorStatus::GENERAL_FAILURE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_0::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
new file mode 100644
index 0000000..9c66446
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
+
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ V1_0::Request request, hal::utils::RequestRelocation relocation,
+ V1_2::MeasureTiming measure);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const V1_0::Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const MeasureTiming kMeasure;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
index 9669d8c0..dae1ff3 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
@@ -28,9 +28,11 @@
#include <fmq/MessageQueue.h>
#include <hidl/MQDescriptor.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/ProtectCallback.h>
#include <atomic>
@@ -51,14 +53,14 @@
* across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
* this class manages the burst's memory cache.
*/
-class ExecutionBurstController final : public nn::IBurst {
+class ExecutionBurstController final
+ : public nn::IBurst,
+ public std::enable_shared_from_this<ExecutionBurstController> {
struct PrivateConstructorTag {};
public:
- using FallbackFunction =
- std::function<nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>(
- const nn::Request&, nn::MeasureTiming, const nn::OptionalTimePoint&,
- const nn::OptionalDuration&)>;
+ using FallbackFunction = std::function<
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>()>;
/**
* NN runtime memory cache.
@@ -154,10 +156,10 @@
* @return ExecutionBurstController Execution burst controller object.
*/
static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
- const sp<IPreparedModel>& preparedModel, FallbackFunction fallback,
+ nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
std::chrono::microseconds pollingTimeWindow);
- ExecutionBurstController(PrivateConstructorTag tag, FallbackFunction fallback,
+ ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
std::unique_ptr<RequestChannelSender> requestChannelSender,
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
@@ -173,9 +175,21 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ // See IBurst::createReusableExecution for information on this method.
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ // If fallback is not nullptr, this method will invoke the fallback function to try another
+ // execution path if the packet could not be sent. Otherwise, failing to send the packet will
+ // result in an error.
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const std::vector<FmqRequestDatum>& requestPacket,
+ const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const;
+
private:
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
- const FallbackFunction kFallback;
+ const nn::SharedPreparedModel kPreparedModel;
const std::unique_ptr<RequestChannelSender> mRequestChannelSender;
const std::unique_ptr<ResultChannelReceiver> mResultChannelReceiver;
const sp<ExecutionBurstCallback> mBurstCallback;
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index fb11130..35abd79 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -58,10 +58,18 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const V1_0::Request& request, MeasureTiming measure,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
const V1_0::Request& request, MeasureTiming measure) const;
diff --git a/neuralnetworks/1.2/utils/src/Execution.cpp b/neuralnetworks/1.2/utils/src/Execution.cpp
new file mode 100644
index 0000000..18d1c90
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/Execution.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_2::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure);
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kPreparedModel->executeInternal(kRequest, kMeasure, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on 1.2 HAL service";
+}
+
+} // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index 7a17f25..b4b6f68 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -28,6 +28,7 @@
#include <nnapi/Types.h>
#include <nnapi/Validation.h>
#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>
@@ -50,6 +51,35 @@
namespace android::hardware::neuralnetworks::V1_2::utils {
namespace {
+class BurstExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<BurstExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+
+ BurstExecution(PrivateConstructorTag tag,
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const ExecutionBurstController> kController;
+ const std::vector<FmqRequestDatum> kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
+};
+
nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
@@ -209,10 +239,10 @@
// ExecutionBurstController methods
nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
- const sp<V1_2::IPreparedModel>& preparedModel, FallbackFunction fallback,
+ nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
std::chrono::microseconds pollingTimeWindow) {
// check inputs
- if (preparedModel == nullptr) {
+ if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
}
@@ -236,7 +266,7 @@
auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
// configure burst
- const Return<void> ret = preparedModel->configureExecutionBurst(
+ const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
HANDLE_TRANSPORT_FAILURE(ret);
@@ -250,18 +280,18 @@
// make and return controller
return std::make_shared<const ExecutionBurstController>(
- PrivateConstructorTag{}, std::move(fallback), std::move(requestChannelSender),
+ PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
std::move(memoryCache), std::move(deathHandler));
}
ExecutionBurstController::ExecutionBurstController(
- PrivateConstructorTag /*tag*/, FallbackFunction fallback,
+ PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
std::unique_ptr<RequestChannelSender> requestChannelSender,
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
- : kFallback(std::move(fallback)),
+ : kPreparedModel(std::move(preparedModel)),
mRequestChannelSender(std::move(requestChannelSender)),
mResultChannelReceiver(std::move(resultChannelReceiver)),
mBurstCallback(std::move(callback)),
@@ -283,26 +313,98 @@
// systraces. Note that the first point we can begin collecting systraces in
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
// ExecutionBurstServer collects systraces at different points in the code.
- NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
// if the request is valid but of a higher version than what's supported in burst execution,
// fall back to another execution path
if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
version > nn::Version::ANDROID_Q) {
// fallback to another execution path if the packet could not be sent
- if (kFallback) {
- return kFallback(request, measure, deadline, loopTimeoutDuration);
- }
- return NN_ERROR() << "Request object has features not supported by IBurst::execute";
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
}
+ // ensure that request is ready for IPC
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
+
// clear pools field of request, as they will be provided via slots
- const auto requestWithoutPools =
- nn::Request{.inputs = request.inputs, .outputs = request.outputs, .pools = {}};
+ const auto requestWithoutPools = nn::Request{
+ .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
auto hidlRequest = NN_TRY(
hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+ std::vector<int32_t> slots;
+ std::vector<OptionalCacheHold> holds;
+ slots.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
+ slots.push_back(slot);
+ holds.push_back(std::move(hold));
+ }
+
+ // send request packet
+ const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
+ const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+ };
+ return executeInternal(requestPacket, relocation, fallback);
+}
+
+// See IBurst::createReusableExecution for information on this method.
+nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
+
+ // if the request is valid but of a higher version than what's supported in burst execution,
+ // fall back to another execution path
+ if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
+ version > nn::Version::ANDROID_Q) {
+ // fallback to another execution path if the packet could not be sent
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+ }
+
+ // ensure that request is ready for IPC
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ // clear pools field of request, as they will be provided via slots
+ const auto requestWithoutPools = nn::Request{
+ .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
+ auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
+ const auto hidlMeasure = NN_TRY(convert(measure));
+
+ std::vector<int32_t> slots;
+ std::vector<OptionalCacheHold> holds;
+ slots.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
+ slots.push_back(slot);
+ holds.push_back(std::move(hold));
+ }
+
+ const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
+ return BurstExecution::create(shared_from_this(), std::move(requestPacket),
+ std::move(relocation), std::move(holds));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
+ const hal::utils::RequestRelocation& relocation,
+ FallbackFunction fallback) const {
+ NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
+ "ExecutionBurstController::executeInternal");
+
// Ensure that at most one execution is in flight at any given time.
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
if (alreadyInFlight) {
@@ -310,22 +412,16 @@
}
const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
- std::vector<int32_t> slots;
- std::vector<OptionalCacheHold> holds;
- slots.reserve(request.pools.size());
- holds.reserve(request.pools.size());
- for (const auto& memoryPool : request.pools) {
- auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
- slots.push_back(slot);
- holds.push_back(std::move(hold));
+ if (relocation.input) {
+ relocation.input->flush();
}
// send request packet
- const auto sendStatus = mRequestChannelSender->send(hidlRequest, hidlMeasure, slots);
+ const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
if (!sendStatus.ok()) {
// fallback to another execution path if the packet could not be sent
- if (kFallback) {
- return kFallback(request, measure, deadline, loopTimeoutDuration);
+ if (fallback) {
+ return fallback();
}
return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
}
@@ -333,7 +429,47 @@
// get result packet
const auto [status, outputShapes, timing] =
NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
+
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return executionCallback(status, outputShapes, timing);
}
+nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
+ if (controller == nullptr) {
+ return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
+ }
+
+ return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
+ std::move(request), std::move(relocation),
+ std::move(cacheHolds));
+}
+
+BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request,
+ hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
+ : kController(std::move(controller)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kCacheHolds(std::move(cacheHolds)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on burst object";
+}
+
} // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index b209a44..d0ef36e 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -18,6 +18,7 @@
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "ExecutionBurstController.h"
#include "ExecutionBurstUtils.h"
#include "Utils.h"
@@ -93,19 +94,32 @@
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
- auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure)
- : executeAsynchronously(hidlRequest, hidlMeasure);
+ return executeInternal(hidlRequest, hidlMeasure, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measure,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
+ auto result = kExecuteSynchronously ? executeSynchronously(request, measure)
+ : executeAsynchronously(request, measure);
auto [outputShapes, timing] = NN_TRY(std::move(result));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -120,6 +134,22 @@
<< "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ auto hidlMeasure = NN_TRY(convert(measure));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
+ hidlMeasure);
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
auto self = shared_from_this();
auto fallback = [preparedModel = std::move(self)](
@@ -130,7 +160,7 @@
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
};
const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
- return ExecutionBurstController::create(kPreparedModel, std::move(fallback), pollingTimeWindow);
+ return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
}
std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
index d297b1a..5e2ad79 100644
--- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
@@ -21,6 +21,7 @@
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -334,6 +335,248 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteAsynchronously(
+ V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(V1_0::ErrorStatus::GENERAL_FAILURE,
+ V1_0::ErrorStatus::GENERAL_FAILURE, {},
+ kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(
+ V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_0::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
new file mode 100644
index 0000000..06c33d4
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation,
+ V1_2::MeasureTiming measure, OptionalTimeoutDuration loopTimeoutDuration);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const V1_2::MeasureTiming kMeasure;
+ const OptionalTimeoutDuration kLoopTimeoutDuration;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index 690fecc..5acba71 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -57,10 +57,26 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp
new file mode 100644
index 0000000..3d17cc3
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Execution.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_3::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure,
+ std::move(loopTimeoutDuration));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(std::move(loopTimeoutDuration)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
+ kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+ const auto hidlDeadline = NN_TRY(convert(deadline));
+ const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline,
+ kLoopTimeoutDuration,
+ hidlTimeoutDurationAfterFence, kRelocation);
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index fd7f8f2..1623de5 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -18,6 +18,7 @@
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "Utils.h"
#include <android/hardware/neuralnetworks/1.0/types.h>
@@ -139,8 +140,11 @@
const nn::OptionalDuration& loopTimeoutDuration) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -148,16 +152,27 @@
const auto hidlLoopTimeoutDuration =
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+ return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
+ relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const Request& request, V1_2::MeasureTiming measure,
+ const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
auto result = kExecuteSynchronously
- ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration)
- : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration);
+ ? executeSynchronously(request, measure, deadline, loopTimeoutDuration)
+ : executeAsynchronously(request, measure, deadline, loopTimeoutDuration);
auto [outputShapes, timing] = NN_TRY(std::move(result));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -168,8 +183,10 @@
const nn::OptionalDuration& timeoutDurationAfterFence) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared =
- NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
const auto hidlRequest = NN_TRY(convert(requestInShared));
const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
@@ -178,27 +195,59 @@
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return executeFencedInternal(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline,
+ hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence,
+ relocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
auto cb = hal::utils::CallbackValue(fencedExecutionCallback);
- const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
- hidlDeadline, hidlLoopTimeoutDuration,
- hidlTimeoutDurationAfterFence, cb);
+ const auto ret =
+ kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
+ timeoutDurationAfterFence, cb);
HANDLE_TRANSPORT_FAILURE(ret);
auto [syncFence, callback] = NN_TRY(cb.take());
// If executeFenced required the request memory to be moved into shared memory, block here until
// the fenced execution has completed and flush the memory back.
- if (maybeRequestInShared.has_value()) {
+ if (relocation.output) {
const auto state = syncFence.syncWait({});
if (state != nn::SyncFence::FenceState::SIGNALED) {
return NN_ERROR() << "syncWait failed with " << state;
}
- NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
+ relocation.output->flush();
}
return std::make_pair(std::move(syncFence), std::move(callback));
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ auto hidlMeasure = NN_TRY(convert(measure));
+ auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
+ hidlMeasure, std::move(hidlLoopTimeoutDuration));
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
auto self = shared_from_this();
auto fallback = [preparedModel = std::move(self)](
@@ -209,7 +258,7 @@
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
};
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
- return V1_2::utils::ExecutionBurstController::create(kPreparedModel, std::move(fallback),
+ return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
pollingTimeWindow);
}
diff --git a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
index 5303c2a..6dbbd6b 100644
--- a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
@@ -22,6 +22,7 @@
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -462,6 +463,363 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteAsynchronously(
+ V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(V1_3::ErrorStatus::GENERAL_FAILURE,
+ V1_3::ErrorStatus::GENERAL_FAILURE, {},
+ kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(
+ V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_3::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_3::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFenced) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::NONE,
+ kNoTiming, kNoTiming)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
+ << ": " << callbackResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
+ // setup call
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::GENERAL_FAILURE,
+ kNoTiming, kNoTiming)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
+ << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
index 008e4e4..0cc78d4 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -38,7 +38,7 @@
namespace aidl::android::hardware::neuralnetworks::utils {
// Class that adapts aidl_hal::IBurst to nn::IBurst.
-class Burst final : public nn::IBurst {
+class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
struct PrivateConstructorTag {};
public:
@@ -100,6 +100,16 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ // See IBurst::createReusableExecution for information.
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
const std::shared_ptr<aidl_hal::IBurst> kBurst;
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
new file mode 100644
index 0000000..a77ea98
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const bool kMeasure;
+ const int64_t kLoopTimeoutDuration;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index abce6cc..4035764 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -18,6 +18,7 @@
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
@@ -34,7 +35,8 @@
namespace aidl::android::hardware::neuralnetworks::utils {
// Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel.
-class PreparedModel final : public nn::IPreparedModel {
+class PreparedModel final : public nn::IPreparedModel,
+ public std::enable_shared_from_this<PreparedModel> {
struct PrivateConstructorTag {};
public:
@@ -55,10 +57,25 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ executeFencedInternal(const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration,
+ int64_t timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel;
};
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index 0b475bc..87cd0e4 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -22,6 +22,7 @@
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IExecution.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -35,6 +36,39 @@
namespace aidl::android::hardware::neuralnetworks::utils {
namespace {
+class BurstExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<BurstExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
+ std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds);
+
+ BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure,
+ int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const Burst> kBurst;
+ const Request kRequest;
+ const std::vector<int64_t>& kMemoryIdentifierTokens;
+ const bool kMeasure;
+ const int64_t kLoopTimeoutDuration;
+ const hal::utils::RequestRelocation kRelocation;
+ const std::vector<Burst::OptionalCacheHold> kCacheHolds;
+};
+
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
const std::vector<OutputShape>& outputShapes, const Timing& timing) {
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
@@ -139,17 +173,13 @@
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const {
- // Ensure that at most one execution is in flight at any given time.
- const bool alreadyInFlight = mExecutionInFlight.test_and_set();
- if (alreadyInFlight) {
- return NN_ERROR() << "IBurst already has an execution in flight";
- }
- const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
-
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -159,9 +189,9 @@
std::vector<int64_t> memoryIdentifierTokens;
std::vector<OptionalCacheHold> holds;
- memoryIdentifierTokens.reserve(request.pools.size());
- holds.reserve(request.pools.size());
- for (const auto& memoryPool : request.pools) {
+ memoryIdentifierTokens.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
auto& [identifier, hold] = *cached;
@@ -172,12 +202,30 @@
}
memoryIdentifierTokens.push_back(-1);
}
- CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
+ CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
+
+ return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
+ aidlLoopTimeoutDuration, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
+ const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ // Ensure that at most one execution is in flight at any given time.
+ const bool alreadyInFlight = mExecutionInFlight.test_and_set();
+ if (alreadyInFlight) {
+ return NN_ERROR() << "IBurst already has an execution in flight";
+ }
+ const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
+
+ if (relocation.input) {
+ relocation.input->flush();
+ }
ExecutionResult executionResult;
- const auto ret =
- kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
- aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+ const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
+ deadline, loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "execute failed";
if (!executionResult.outputSufficientSize) {
auto canonicalOutputShapes =
@@ -188,10 +236,89 @@
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
+nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto aidlRequest = NN_TRY(convert(requestInShared));
+ const auto aidlMeasure = NN_TRY(convert(measure));
+ const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+
+ std::vector<int64_t> memoryIdentifierTokens;
+ std::vector<OptionalCacheHold> holds;
+ memoryIdentifierTokens.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
+ if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
+ auto& [identifier, hold] = *cached;
+ memoryIdentifierTokens.push_back(identifier);
+ holds.push_back(std::move(hold));
+ continue;
+ }
+ }
+ memoryIdentifierTokens.push_back(-1);
+ }
+ CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
+
+ return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
+ std::move(memoryIdentifierTokens), aidlMeasure,
+ aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
+}
+
+nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
+ std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds) {
+ if (burst == nullptr) {
+ return NN_ERROR() << "aidl::utils::BurstExecution::create must have non-null burst";
+ }
+
+ return std::make_shared<const BurstExecution>(
+ PrivateConstructorTag{}, std::move(burst), std::move(request),
+ std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
+ std::move(cacheHolds));
+}
+
+BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
+ Request request, std::vector<int64_t> memoryIdentifierTokens,
+ bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds)
+ : kBurst(std::move(burst)),
+ kRequest(std::move(request)),
+ kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(loopTimeoutDuration),
+ kRelocation(std::move(relocation)),
+ kCacheHolds(std::move(cacheHolds)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
+ kLoopTimeoutDuration, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on burst object";
+}
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
new file mode 100644
index 0000000..2aee8a6
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure,
+ loopTimeoutDuration);
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(loopTimeoutDuration) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
+ kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto aidlWaitFor = NN_TRY(convert(waitFor));
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+ const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return kPreparedModel->executeFencedInternal(kRequest, aidlWaitFor, kMeasure, aidlDeadline,
+ kLoopTimeoutDuration,
+ aidlTimeoutDurationAfterFence, kRelocation);
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 003965b..18e7636 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -19,8 +19,11 @@
#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
+#include "ProtectCallback.h"
#include "Utils.h"
+#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
@@ -74,18 +77,32 @@
const nn::OptionalDuration& loopTimeoutDuration) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
const auto aidlLoopTimeoutDuration =
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+ return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
+ relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
+ int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
ExecutionResult executionResult;
- const auto ret = kPreparedModel->executeSynchronously(
- aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+ const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
+ loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "executeSynchronously failed";
if (!executionResult.outputSufficientSize) {
auto canonicalOutputShapes =
@@ -96,9 +113,9 @@
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -109,8 +126,10 @@
const nn::OptionalDuration& timeoutDurationAfterFence) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared =
- NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
const auto aidlRequest = NN_TRY(convert(requestInShared));
const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -118,11 +137,25 @@
const auto aidlDeadline = NN_TRY(convert(deadline));
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
+ aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
+ relocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFencedInternal(const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ int64_t timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
FencedExecutionResult result;
- const auto ret = kPreparedModel->executeFenced(aidlRequest, aidlWaitFor, aidlMeasure,
- aidlDeadline, aidlLoopTimeoutDuration,
- aidlTimeoutDurationAfterFence, &result);
+ const auto ret =
+ kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
+ timeoutDurationAfterFence, &result);
HANDLE_ASTATUS(ret) << "executeFenced failed";
auto resultSyncFence = nn::SyncFence::createAsSignaled();
@@ -137,12 +170,12 @@
// If executeFenced required the request memory to be moved into shared memory, block here until
// the fenced execution has completed and flush the memory back.
- if (maybeRequestInShared.has_value()) {
+ if (relocation.output) {
const auto state = resultSyncFence.syncWait({});
if (state != nn::SyncFence::FenceState::SIGNALED) {
return NN_ERROR() << "syncWait failed with " << state;
}
- NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
+ relocation.output->flush();
}
// Create callback which can be used to retrieve the execution error status and timings.
@@ -159,6 +192,23 @@
return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto aidlRequest = NN_TRY(convert(requestInShared));
+ auto aidlMeasure = NN_TRY(convert(measure));
+ auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+ return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation),
+ aidlMeasure, aidlLoopTimeoutDuration);
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
std::shared_ptr<IBurst> burst;
const auto ret = kPreparedModel->configureExecutionBurst(&burst);
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index ff98a7d..8bb5c90 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -21,6 +21,7 @@
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -253,6 +254,225 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockExecutionResult = ExecutionResult{
+ .outputSufficientSize = true,
+ .outputShapes = {},
+ .timing = kNoTiming,
+ };
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeGeneralFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFenced) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
+ << ": " << callbackResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
+ // setup call
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
+ Invoke(makeStatusOk))));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
+ << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedError) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 8fe6b90..702ee92 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -20,6 +20,7 @@
#include <cutils/native_handle.h>
#include <hidl/HidlSupport.h>
#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
#include <nnapi/Types.h>
#include <functional>
#include <vector>
@@ -59,19 +60,70 @@
nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
+// Record a relocation mapping between pointer-based data and shared memory.
+// Only two specializations of this template may exist:
+// - RelocationInfo<const void*> for request inputs
+// - RelocationInfo<void*> for request outputs
+template <typename PointerType>
+struct RelocationInfo {
+ PointerType data;
+ size_t length;
+ size_t offset;
+};
+using InputRelocationInfo = RelocationInfo<const void*>;
+using OutputRelocationInfo = RelocationInfo<void*>;
+
+// Keep track of the relocation mapping between pointer-based data and shared memory pool,
+// and provide method to copy the data between pointers and the shared memory pool.
+// Only two specializations of this template may exist:
+// - RelocationTracker<InputRelocationInfo> for request inputs
+// - RelocationTracker<OutputRelocationInfo> for request outputs
+template <typename RelocationInfoType>
+class RelocationTracker {
+ public:
+ static nn::GeneralResult<std::unique_ptr<RelocationTracker>> create(
+ std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory) {
+ auto mapping = NN_TRY(map(memory));
+ return std::make_unique<RelocationTracker<RelocationInfoType>>(
+ std::move(relocationInfos), std::move(memory), std::move(mapping));
+ }
+
+ RelocationTracker(std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory,
+ nn::Mapping mapping)
+ : kRelocationInfos(std::move(relocationInfos)),
+ kMemory(std::move(memory)),
+ kMapping(std::move(mapping)) {}
+
+ // Specializations defined in CommonUtils.cpp.
+ // For InputRelocationTracker, this method will copy pointer data to the shared memory pool.
+ // For OutputRelocationTracker, this method will copy shared memory data to the pointers.
+ void flush() const;
+
+ private:
+ const std::vector<RelocationInfoType> kRelocationInfos;
+ const nn::SharedMemory kMemory;
+ const nn::Mapping kMapping;
+};
+using InputRelocationTracker = RelocationTracker<InputRelocationInfo>;
+using OutputRelocationTracker = RelocationTracker<OutputRelocationInfo>;
+
+struct RequestRelocation {
+ std::unique_ptr<InputRelocationTracker> input;
+ std::unique_ptr<OutputRelocationTracker> output;
+};
+
// Relocate pointer-based data to shared memory. If `request` has no
// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If
// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to
// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function
-// returns with a reference to `*maybeRequestInSharedOut`.
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
- const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
-
-// Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
-// `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
-// Request object back to the output pointer-based memory in the original Request object.
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
- const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
+// returns with a reference to `*maybeRequestInSharedOut`. The `relocationOut` will be set to track
+// the input and output relocations.
+//
+// Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
+// shared memory pool. Use `relocationOut` to flush the input or output data after the call.
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+ const nn::Request* request, uint32_t alignment, uint32_t padding,
+ std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut);
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
size_t numberOfOperands, const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index 17b3fd9..e86edda 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -35,6 +35,10 @@
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
};
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
new file mode 100644
index 0000000..5b00221
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidExecution final : public nn::IExecution {
+ public:
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+};
+
+} // namespace android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index 3e1dca7..de30aae 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -40,6 +40,10 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index c92cc41..fde2486 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -51,7 +51,16 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
private:
+ bool isValidInternal() const EXCLUDES(mMutex);
+ nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const;
+
const Factory kMakeBurst;
mutable std::mutex mMutex;
mutable nn::SharedBurst mBurst GUARDED_BY(mMutex);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
new file mode 100644
index 0000000..d0084e8
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<ResilientExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ using Factory = std::function<nn::GeneralResult<nn::SharedExecution>()>;
+
+ static nn::GeneralResult<std::shared_ptr<const ResilientExecution>> create(
+ Factory makeExecution);
+
+ ResilientExecution(PrivateConstructorTag tag, Factory makeExecution,
+ nn::SharedExecution execution);
+
+ nn::SharedExecution getExecution() const;
+ nn::GeneralResult<nn::SharedExecution> recover(const nn::IExecution* failingExecution) const;
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ bool isValidInternal() const EXCLUDES(mMutex);
+
+ const Factory kMakeExecution;
+ mutable std::mutex mMutex;
+ mutable nn::SharedExecution mExecution GUARDED_BY(mMutex);
+};
+
+} // namespace android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index a6c1b19..86533ed 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -58,12 +58,19 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
private:
bool isValidInternal() const EXCLUDES(mMutex);
+ nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
const Factory kMakePreparedModel;
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 4d26795..8e55bf0 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -200,10 +200,31 @@
return **maybeModelInSharedOut;
}
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
- const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
+template <>
+void InputRelocationTracker::flush() const {
+ // Copy from pointers to shared memory.
+ uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(memoryPtr + offset, data, length);
+ }
+}
+
+template <>
+void OutputRelocationTracker::flush() const {
+ // Copy from shared memory to pointers.
+ const uint8_t* memoryPtr = static_cast<const uint8_t*>(
+ std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(data, memoryPtr + offset, length);
+ }
+}
+
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+ const nn::Request* request, uint32_t alignment, uint32_t padding,
+ std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut) {
CHECK(request != nullptr);
CHECK(maybeRequestInSharedOut != nullptr);
+ CHECK(relocationOut != nullptr);
if (hasNoPointerData(*request)) {
return *request;
@@ -213,8 +234,11 @@
// to the caller through `maybeRequestInSharedOut` if the function succeeds.
nn::Request requestInShared = *request;
+ RequestRelocation relocation;
+
// Change input pointers to shared memory.
- nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
+ nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
+ std::vector<InputRelocationInfo> inputRelocationInfos;
for (auto& input : requestInShared.inputs) {
const auto& location = input.location;
if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -225,17 +249,21 @@
const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
location.pointer);
CHECK(data != nullptr);
- input.location = inputBuilder.append(data, location.length);
+ input.location = inputBuilder.append(location.length, alignment, padding);
+ inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
}
// Allocate input memory.
if (!inputBuilder.empty()) {
auto memory = NN_TRY(inputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.input = NN_TRY(
+ InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
}
// Change output pointers to shared memory.
nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
+ std::vector<OutputRelocationInfo> outputRelocationInfos;
for (auto& output : requestInShared.outputs) {
const auto& location = output.location;
if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -243,62 +271,25 @@
}
output.lifetime = nn::Request::Argument::LifeTime::POOL;
- output.location = outputBuilder.append(location.length);
+ void* data = std::get<void*>(location.pointer);
+ CHECK(data != nullptr);
+ output.location = outputBuilder.append(location.length, alignment, padding);
+ outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
}
// Allocate output memory.
if (!outputBuilder.empty()) {
auto memory = NN_TRY(outputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
+ std::move(memory)));
}
*maybeRequestInSharedOut = requestInShared;
+ *relocationOut = std::move(relocation);
return **maybeRequestInSharedOut;
}
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
- const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
- if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
- !std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
- return {};
- }
- const auto& requestInShared = *maybeRequestInShared;
-
- // Map the memory.
- const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
- const auto [pointer, size, context] = NN_TRY(map(outputMemory));
- const uint8_t* constantPointer =
- std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
-
- // Flush each output pointer.
- CHECK_EQ(request.outputs.size(), requestInShared.outputs.size());
- for (size_t i = 0; i < request.outputs.size(); ++i) {
- const auto& location = request.outputs[i].location;
- const auto& locationInShared = requestInShared.outputs[i].location;
- if (!std::holds_alternative<void*>(location.pointer)) {
- continue;
- }
-
- // Get output pointer and size.
- void* data = std::get<void*>(location.pointer);
- CHECK(data != nullptr);
- const size_t length = location.length;
-
- // Get output pool location.
- CHECK(requestInShared.outputs[i].lifetime == nn::Request::Argument::LifeTime::POOL);
- const size_t index = locationInShared.poolIndex;
- const size_t offset = locationInShared.offset;
- const size_t outputPoolIndex = requestInShared.pools.size() - 1;
- CHECK(locationInShared.length == length);
- CHECK(index == outputPoolIndex);
-
- // Flush memory.
- std::memcpy(data, constantPointer + offset, length);
- }
-
- return {};
-}
-
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 0c34f05..0191533 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -38,4 +38,10 @@
return NN_ERROR() << "InvalidBurst";
}
+nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ return NN_ERROR() << "InvalidBurst";
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidExecution.cpp b/neuralnetworks/utils/common/src/InvalidExecution.cpp
new file mode 100644
index 0000000..c4edd25
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidExecution.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidExecution.h"
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidExecution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+InvalidExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9081e1f..8195462 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -42,6 +42,12 @@
return NN_ERROR() << "InvalidPreparedModel";
}
+nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ return NN_ERROR() << "InvalidPreparedModel";
+}
+
nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
return NN_ERROR() << "InvalidPreparedModel";
}
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 38ccc62..79cbe39 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -19,6 +19,7 @@
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -29,6 +30,9 @@
#include <optional>
#include <utility>
+#include "InvalidExecution.h"
+#include "ResilientExecution.h"
+
namespace android::hardware::neuralnetworks::utils {
namespace {
@@ -46,11 +50,11 @@
// Attempt recovery and return if it fails.
auto maybeBurst = resilientBurst.recover(burst.get());
if (!maybeBurst.has_value()) {
- auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
- const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
- return nn::error(resultErrorCode, std::move(resultOutputShapes))
- << resultErrorMessage << ", and failed to recover dead burst object with error "
- << recoveryErrorCode << ": " << recoveryErrorMessage;
+ const auto& [message, code] = maybeBurst.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead burst object with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
}
burst = std::move(maybeBurst).value();
@@ -109,4 +113,35 @@
return protect(*this, fn);
}
+nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+ auto self = shared_from_this();
+ ResilientExecution::Factory makeExecution =
+ [burst = std::move(self), request, measure, loopTimeoutDuration] {
+ return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ };
+ return ResilientExecution::create(std::move(makeExecution));
+#else
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidExecution>();
+ }
+ const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
+ return burst.createReusableExecution(request, measure, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
+}
+
+bool ResilientBurst::isValidInternal() const {
+ return true;
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientExecution.cpp b/neuralnetworks/utils/common/src/ResilientExecution.cpp
new file mode 100644
index 0000000..46b404a
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientExecution.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientExecution.h"
+
+#include "InvalidBurst.h"
+#include "ResilientBurst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientExecution& resilientExecution, const FnType& fn)
+ -> decltype(fn(*resilientExecution.getExecution())) {
+ auto execution = resilientExecution.getExecution();
+ auto result = fn(*execution);
+
+ // Immediately return if prepared model is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybeExecution = resilientExecution.recover(execution.get());
+ if (!maybeExecution.has_value()) {
+ const auto& [message, code] = maybeExecution.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
+ }
+ execution = std::move(maybeExecution).value();
+
+ return fn(*execution);
+}
+
+} // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientExecution>> ResilientExecution::create(
+ Factory makeExecution) {
+ if (makeExecution == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+ << "utils::ResilientExecution::create must have non-empty makeExecution";
+ }
+ auto execution = NN_TRY(makeExecution());
+ CHECK(execution != nullptr);
+ return std::make_shared<ResilientExecution>(PrivateConstructorTag{}, std::move(makeExecution),
+ std::move(execution));
+}
+
+ResilientExecution::ResilientExecution(PrivateConstructorTag /*tag*/, Factory makeExecution,
+ nn::SharedExecution execution)
+ : kMakeExecution(std::move(makeExecution)), mExecution(std::move(execution)) {
+ CHECK(kMakeExecution != nullptr);
+ CHECK(mExecution != nullptr);
+}
+
+nn::SharedExecution ResilientExecution::getExecution() const {
+ std::lock_guard guard(mMutex);
+ return mExecution;
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientExecution::recover(
+ const nn::IExecution* failingExecution) const {
+ std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing prepared model.
+ if (mExecution.get() != failingExecution) {
+ return mExecution;
+ }
+
+ mExecution = NN_TRY(kMakeExecution());
+ return mExecution;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ResilientExecution::compute(const nn::OptionalTimePoint& deadline) const {
+ const auto fn = [&deadline](const nn::IExecution& execution) {
+ return execution.compute(deadline);
+ };
+ return protect(*this, fn);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ResilientExecution::computeFenced(const std::vector<nn::SyncFence>& waitFor,
+ const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto fn = [&waitFor, &deadline,
+ &timeoutDurationAfterFence](const nn::IExecution& execution) {
+ return execution.computeFenced(waitFor, deadline, timeoutDurationAfterFence);
+ };
+ return protect(*this, fn);
+}
+
+bool ResilientExecution::isValidInternal() const {
+ return true;
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 5dd5f99..1ae19bc 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -17,7 +17,9 @@
#include "ResilientPreparedModel.h"
#include "InvalidBurst.h"
+#include "InvalidExecution.h"
#include "ResilientBurst.h"
+#include "ResilientExecution.h"
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
@@ -127,6 +129,21 @@
return protect(*this, fn);
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+ auto self = shared_from_this();
+ ResilientExecution::Factory makeExecution =
+ [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
+ return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ };
+ return ResilientExecution::create(std::move(makeExecution));
+#else
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
#if 0
auto self = shared_from_this();
@@ -140,6 +157,19 @@
#endif
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidExecution>();
+ }
+ const auto fn = [&request, measure,
+ &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
+}
+
std::any ResilientPreparedModel::getUnderlyingResource() const {
return getPreparedModel()->getUnderlyingResource();
}
diff --git a/neuralnetworks/utils/common/test/MockExecution.h b/neuralnetworks/utils/common/test/MockExecution.h
new file mode 100644
index 0000000..91e3428
--- /dev/null
+++ b/neuralnetworks/utils/common/test/MockExecution.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
+
+namespace android::nn {
+
+class MockExecution final : public IExecution {
+ public:
+ MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), compute,
+ (const OptionalTimePoint& deadline), (const, override));
+ MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), computeFenced,
+ (const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline,
+ const OptionalDuration& timeoutDurationAfterFence),
+ (const, override));
+};
+
+} // namespace android::nn
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c004861..c8ce006 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -35,6 +35,10 @@
const OptionalDuration& loopTimeoutDuration,
const OptionalDuration& timeoutDurationAfterFence),
(const, override));
+ MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
+ (const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration),
+ (const, override));
MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
};
diff --git a/neuralnetworks/utils/common/test/ResilientExecution.cpp b/neuralnetworks/utils/common/test/ResilientExecution.cpp
new file mode 100644
index 0000000..c0737fb
--- /dev/null
+++ b/neuralnetworks/utils/common/test/ResilientExecution.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientExecution.h>
+#include <utility>
+#include "MockExecution.h"
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+using ::testing::_;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+using SharedMockExecution = std::shared_ptr<const nn::MockExecution>;
+using MockExecutionFactory = ::testing::MockFunction<nn::GeneralResult<nn::SharedExecution>()>;
+
+SharedMockExecution createMockExecution() {
+ return std::make_shared<const nn::MockExecution>();
+}
+
+std::tuple<SharedMockExecution, std::unique_ptr<MockExecutionFactory>,
+ std::shared_ptr<const ResilientExecution>>
+setup() {
+ auto mockExecution = std::make_shared<const nn::MockExecution>();
+
+ auto mockExecutionFactory = std::make_unique<MockExecutionFactory>();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(mockExecution));
+
+ auto buffer = ResilientExecution::create(mockExecutionFactory->AsStdFunction()).value();
+ return std::make_tuple(std::move(mockExecution), std::move(mockExecutionFactory),
+ std::move(buffer));
+}
+
+constexpr auto makeError = [](nn::ErrorStatus status) {
+ return [status](const auto&... /*args*/) { return nn::error(status); };
+};
+const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
+const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
+
+const auto kNoExecutionError =
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
+const auto kNoFencedExecutionError =
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>(
+ std::make_pair(nn::SyncFence::createAsSignaled(), nullptr));
+
+} // namespace
+
+TEST(ResilientExecutionTest, invalidExecutionFactory) {
+ // setup call
+ const auto invalidExecutionFactory = ResilientExecution::Factory{};
+
+ // run test
+ const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST(ResilientExecutionTest, executionFactoryFailure) {
+ // setup call
+ const auto invalidExecutionFactory = kReturnGeneralFailure;
+
+ // run test
+ const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, getExecution) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+
+ // run test
+ const auto result = execution->getExecution();
+
+ // verify result
+ EXPECT_TRUE(result == mockExecution);
+}
+
+TEST(ResilientExecutionTest, compute) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeError) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectFailedRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectSuccessfulRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*recoveredMockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFenced) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoFencedExecutionError));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFencedError) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectFailedRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectSuccessfulRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*recoveredMockExecution, computeFenced(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoFencedExecutionError));
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, recover) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+TEST(ResilientExecutionTest, recoverFailure) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(ResilientExecutionTest, someoneElseRecovered) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+ execution->recover(mockExecution.get());
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index 6d86e10..d396ca8 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -55,6 +55,7 @@
const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
+const auto kNoCreateReusableExecutionError = nn::GeneralResult<nn::SharedExecution>{};
const auto kNoExecutionError =
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
const auto kNoFencedExecutionError =
@@ -231,6 +232,36 @@
<< "Failed with " << result.error().code << ": " << result.error().message;
}
+TEST(ResilientPreparedModelTest, createReusableExecution) {
+ // setup call
+ const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoCreateReusableExecutionError));
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientPreparedModelTest, createReusableExecutionError) {
+ // setup call
+ const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ .Times(1)
+ .WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(ResilientPreparedModelTest, getUnderlyingResource) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
diff --git a/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl b/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
index 12d2042..66c8c8c 100644
--- a/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
+++ b/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
@@ -25,7 +25,7 @@
*/
int id;
/**
- * Time since boot in milliseconds
+ * Time of data capture in milliseconds since boot (CLOCK_BOOTTIME clock)
*/
long timestampMs;
/**
@@ -38,4 +38,3 @@
*/
EnergyConsumerAttribution[] attribution;
}
-
diff --git a/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl b/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
index d3e8f46..31fbaa8 100644
--- a/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
+++ b/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
@@ -23,7 +23,7 @@
*/
int id;
/**
- * Approximate time of data capture in millseconds since boot
+ * Time of data capture in milliseconds since boot (CLOCK_BOOTTIME clock)
*/
long timestampMs;
/**
@@ -35,4 +35,3 @@
*/
long energyUWs;
}
-
diff --git a/radio/1.0/vts/OWNERS b/radio/1.0/vts/OWNERS
index 2384317..9310f8e 100644
--- a/radio/1.0/vts/OWNERS
+++ b/radio/1.0/vts/OWNERS
@@ -1,8 +1,7 @@
# Telephony team
amitmahajan@google.com
-sanketpadawe@google.com
shuoq@google.com
+jackyu@google.com
# VTS team
-yuexima@google.com
-yim@google.com
+dshi@google.com
diff --git a/radio/1.0/vts/functional/vts_test_util.cpp b/radio/1.0/vts/functional/vts_test_util.cpp
index fc37201..5b31acc 100644
--- a/radio/1.0/vts/functional/vts_test_util.cpp
+++ b/radio/1.0/vts/functional/vts_test_util.cpp
@@ -83,6 +83,13 @@
return hasFeature;
}
+bool isSsSsEnabled() {
+ // Do not use checkSubstringInCommandOutput("getprop persist.radio.multisim.config", "")
+ // until b/148904287 is fixed. We need exact matching instead of partial matching. (i.e.
+ // by definition the empty string "" is a substring of any string).
+ return !isDsDsEnabled() && !isTsTsEnabled();
+}
+
bool isDsDsEnabled() {
return testing::checkSubstringInCommandOutput("getprop persist.radio.multisim.config", "dsds");
}
diff --git a/radio/1.0/vts/functional/vts_test_util.h b/radio/1.0/vts/functional/vts_test_util.h
index eeb1d29..fa338a3 100644
--- a/radio/1.0/vts/functional/vts_test_util.h
+++ b/radio/1.0/vts/functional/vts_test_util.h
@@ -80,12 +80,17 @@
bool deviceSupportsFeature(const char* feature);
/*
- * Check if device is in DSDS.
+ * Check if device is in SsSs (Single SIM Single Standby).
+ */
+bool isSsSsEnabled();
+
+/*
+ * Check if device is in DSDS (Dual SIM Dual Standby).
*/
bool isDsDsEnabled();
/*
- * Check if device is in TSTS.
+ * Check if device is in TSTS (Triple SIM Triple Standby).
*/
bool isTsTsEnabled();
diff --git a/radio/1.1/vts/OWNERS b/radio/1.1/vts/OWNERS
index 2384317..a07c917 100644
--- a/radio/1.1/vts/OWNERS
+++ b/radio/1.1/vts/OWNERS
@@ -1,8 +1 @@
-# Telephony team
-amitmahajan@google.com
-sanketpadawe@google.com
-shuoq@google.com
-
-# VTS team
-yuexima@google.com
-yim@google.com
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.2/vts/OWNERS b/radio/1.2/vts/OWNERS
index 245d9d4..a07c917 100644
--- a/radio/1.2/vts/OWNERS
+++ b/radio/1.2/vts/OWNERS
@@ -1,9 +1 @@
-# Telephony team
-amitmahajan@google.com
-sanketpadawe@google.com
-shuoq@google.com
-sasindran@google.com
-
-# VTS team
-yuexima@google.com
-yim@google.com
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.3/vts/OWNERS b/radio/1.3/vts/OWNERS
index d642064..a07c917 100644
--- a/radio/1.3/vts/OWNERS
+++ b/radio/1.3/vts/OWNERS
@@ -1,10 +1 @@
-# Telephony team
-amitmahajan@google.com
-sanketpadawe@google.com
-shuoq@google.com
-sasindran@google.com
-nazaninb@google.com
-
-# VTS team
-yuexima@google.com
-yim@google.com
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.4/vts/OWNERS b/radio/1.4/vts/OWNERS
index fd69f36..a07c917 100644
--- a/radio/1.4/vts/OWNERS
+++ b/radio/1.4/vts/OWNERS
@@ -1,8 +1 @@
-# Telephony team
-amitmahajan@google.com
-shuoq@google.com
-sasindran@google.com
-
-# VTS team
-yuexima@google.com
-yim@google.com
\ No newline at end of file
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.5/vts/OWNERS b/radio/1.5/vts/OWNERS
index 3629a6c..a07c917 100644
--- a/radio/1.5/vts/OWNERS
+++ b/radio/1.5/vts/OWNERS
@@ -1,10 +1 @@
-# Telephony team
-refuhoo@google.com
-amitmahajan@google.com
-jackyu@google.com
-fionaxu@google.com
-# more to add
-
-# VTS team
-yuexima@google.com
-dshi@google.com
\ No newline at end of file
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.6/vts/OWNERS b/radio/1.6/vts/OWNERS
index 3629a6c..a07c917 100644
--- a/radio/1.6/vts/OWNERS
+++ b/radio/1.6/vts/OWNERS
@@ -1,10 +1 @@
-# Telephony team
-refuhoo@google.com
-amitmahajan@google.com
-jackyu@google.com
-fionaxu@google.com
-# more to add
-
-# VTS team
-yuexima@google.com
-dshi@google.com
\ No newline at end of file
+include ../../1.0/vts/OWNERS
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index 037d747..5af007e 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -512,6 +512,8 @@
::android::hardware::radio::V1_6::RadioError::NONE,
::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
}
+
+ sleep(1);
}
/*
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
index 4222441..8c99d92 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
@@ -45,10 +45,8 @@
::android::hardware::radio::V1_0::RadioError::NONE) {
static const std::regex kOperatorNumericRe("^[0-9]{5,6}$");
for (OperatorInfo info : radioRsp_v1_6->networkInfos) {
- if (info.operatorNumeric != nullptr) {
- ASSERT_TRUE(
- std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
- }
+ ASSERT_TRUE(
+ std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
}
}
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_test.cpp b/radio/1.6/vts/functional/radio_hidl_hal_test.cpp
index 5d514a0..00f4468 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_test.cpp
@@ -16,8 +16,39 @@
#include <radio_hidl_hal_utils_v1_6.h>
+bool isServiceValidForDeviceConfiguration(hidl_string& serviceName) {
+ if (isSsSsEnabled()) {
+ // Device is configured as SSSS.
+ if (serviceName != RADIO_SERVICE_SLOT1_NAME) {
+ ALOGI("%s instance is not valid for SSSS device.", serviceName.c_str());
+ return false;
+ }
+ } else if (isDsDsEnabled()) {
+ // Device is configured as DSDS.
+ if (serviceName != RADIO_SERVICE_SLOT1_NAME && serviceName != RADIO_SERVICE_SLOT2_NAME) {
+ ALOGI("%s instance is not valid for DSDS device.", serviceName.c_str());
+ return false;
+ }
+ } else if (isTsTsEnabled()) {
+ // Device is configured as TSTS.
+ if (serviceName != RADIO_SERVICE_SLOT1_NAME && serviceName != RADIO_SERVICE_SLOT2_NAME &&
+ serviceName != RADIO_SERVICE_SLOT3_NAME) {
+ ALOGI("%s instance is not valid for TSTS device.", serviceName.c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
void RadioHidlTest_v1_6::SetUp() {
- radio_v1_6 = android::hardware::radio::V1_6::IRadio::getService(GetParam());
+ hidl_string serviceName = GetParam();
+
+ if (!isServiceValidForDeviceConfiguration(serviceName)) {
+ ALOGI("Skipped the test due to device configuration.");
+ GTEST_SKIP();
+ }
+
+ radio_v1_6 = android::hardware::radio::V1_6::IRadio::getService(serviceName);
ASSERT_NE(nullptr, radio_v1_6.get());
radioRsp_v1_6 = new (std::nothrow) RadioResponse_v1_6(*this);
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
index 3185f98..54c2977 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
+++ b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
@@ -48,7 +48,9 @@
#define MODEM_EMERGENCY_CALL_ESTABLISH_TIME 3
#define MODEM_EMERGENCY_CALL_DISCONNECT_TIME 3
-#define RADIO_SERVICE_NAME "slot1"
+#define RADIO_SERVICE_SLOT1_NAME "slot1" // HAL instance name for SIM slot 1 or single SIM device
+#define RADIO_SERVICE_SLOT2_NAME "slot2" // HAL instance name for SIM slot 2 on dual SIM device
+#define RADIO_SERVICE_SLOT3_NAME "slot3" // HAL instance name for SIM slot 3 on triple SIM device
class RadioHidlTest_v1_6;
extern ::android::hardware::radio::V1_5::CardStatus cardStatus;
diff --git a/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl b/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
index c6f89bd..b6af813 100644
--- a/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
+++ b/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
@@ -275,8 +275,7 @@
*
* o Tag::RSA_PUBLIC_EXPONENT specifies the RSA public exponent value. If omitted, generateKey
* must return ErrorCode::INVALID_ARGUMENT. The values 3 and 65537 must be supported. It is
- * recommended to support all prime values up to 2^64. If provided with a non-prime value,
- * generateKey must return ErrorCode::INVALID_ARGUMENT.
+ * recommended to support all prime values up to 2^64.
*
* The following parameters are not necessary to generate a usable RSA key, but generateKey must
* not return an error if they are omitted:
diff --git a/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl b/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
index 1e101ab..8fbc91a 100644
--- a/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
+++ b/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
@@ -505,10 +505,10 @@
/**
* Tag::APPLICATION_ID. When provided to generateKey or importKey, this tag specifies data
- * that is necessary during all uses of the key. In particular, calls to exportKey() must
- * provide the same value to the clientId parameter, and calls to begin() must provide this
- * tag and the same associated data as part of the inParams set. If the correct data is not
- * provided, the method must return ErrorCode::INVALID_KEY_BLOB.
+ * that is necessary during all uses of the key. In particular, calls to exportKey() and
+ * getKeyCharacteristics() must provide the same value to the clientId parameter, and calls to
+ * begin() must provide this tag and the same associated data as part of the inParams set. If
+ * the correct data is not provided, the method must return ErrorCode::INVALID_KEY_BLOB.
*
* The content of this tag must be bound to the key cryptographically, meaning it must not be
* possible for an adversary who has access to all of the secure world secrets but does not have
@@ -573,8 +573,8 @@
* Tag::OS_VERSION specifies the system OS version with which the key may be used. This tag is
* never sent to the IKeyMintDevice, but is added to the hardware-enforced authorization list
* by the TA. Any attempt to use a key with a Tag::OS_VERSION value different from the
- * currently-running OS version must cause begin() or exportKey() to return
- * ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for details.
+ * currently-running OS version must cause begin(), getKeyCharacteristics() or exportKey() to
+ * return ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for details.
*
* The value of the tag is an integer of the form MMmmss, where MM is the major version number,
* mm is the minor version number, and ss is the sub-minor version number. For example, for a
@@ -596,8 +596,9 @@
* Tag::OS_PATCHLEVEL specifies the system security patch level with which the key may be used.
* This tag is never sent to the keyMint TA, but is added to the hardware-enforced
* authorization list by the TA. Any attempt to use a key with a Tag::OS_PATCHLEVEL value
- * different from the currently-running system patchlevel must cause begin() or
- * exportKey() to return ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for details.
+ * different from the currently-running system patchlevel must cause begin(),
+ * getKeyCharacteristics() or exportKey() to return ErrorCode::KEY_REQUIRES_UPGRADE. See
+ * upgradeKey() for details.
*
* The value of the tag is an integer of the form YYYYMM, where YYYY is the four-digit year of
* the last update and MM is the two-digit month of the last update. For example, for a key
@@ -789,8 +790,9 @@
* Tag::VENDOR_PATCHLEVEL specifies the vendor image security patch level with which the key may
* be used. This tag is never sent to the keyMint TA, but is added to the hardware-enforced
* authorization list by the TA. Any attempt to use a key with a Tag::VENDOR_PATCHLEVEL value
- * different from the currently-running system patchlevel must cause begin() or
- * exportKey() to return ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for details.
+ * different from the currently-running system patchlevel must cause begin(),
+ * getKeyCharacteristics() or exportKey() to return ErrorCode::KEY_REQUIRES_UPGRADE. See
+ * upgradeKey() for details.
*
* The value of the tag is an integer of the form YYYYMMDD, where YYYY is the four-digit year of
* the last update, MM is the two-digit month and DD is the two-digit day of the last
@@ -811,8 +813,8 @@
* key may be used. This tag is never sent to the keyMint TA, but is added to the
* hardware-enforced authorization list by the TA. Any attempt to use a key with a
* Tag::BOOT_PATCHLEVEL value different from the currently-running system patchlevel must
- * cause begin() or exportKey() to return ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for
- * details.
+ * cause begin(), getKeyCharacteristics() or exportKey() to return
+ * ErrorCode::KEY_REQUIRES_UPGRADE. See upgradeKey() for details.
*
* The value of the tag is an integer of the form YYYYMMDD, where YYYY is the four-digit year of
* the last update, MM is the two-digit month and DD is the two-digit day of the last
diff --git a/security/keymint/aidl/vts/functional/AttestKeyTest.cpp b/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
index afb2193..881354d 100644
--- a/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
+++ b/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
@@ -56,6 +56,7 @@
{} /* attestation signing key */, &attest_key.keyBlob,
&attest_key_characteristics, &attest_key_cert_chain));
+ ASSERT_GT(attest_key_cert_chain.size(), 0);
EXPECT_EQ(attest_key_cert_chain.size(), 1);
EXPECT_TRUE(IsSelfSigned(attest_key_cert_chain)) << "Failed on size " << size;
@@ -124,16 +125,18 @@
EXPECT_EQ(attested_key_cert_chain.size(), 2);
/*
- * Use attestation key to sign EC key
+ * Use attestation key to sign EC key. Specify a CREATION_DATETIME for this one.
*/
attested_key_characteristics.resize(0);
attested_key_cert_chain.resize(0);
+ uint64_t timestamp = 1619621648000;
EXPECT_EQ(ErrorCode::OK,
GenerateKey(AuthorizationSetBuilder()
.EcdsaSigningKey(EcCurve::P_256)
.Authorization(TAG_NO_AUTH_REQUIRED)
.AttestationChallenge("foo")
.AttestationApplicationId("bar")
+ .Authorization(TAG_CREATION_DATETIME, timestamp)
.SetDefaultValidity(),
attest_key, &attested_key_blob, &attested_key_characteristics,
&attested_key_cert_chain));
@@ -143,6 +146,12 @@
hw_enforced = HwEnforcedAuthorizations(attested_key_characteristics);
sw_enforced = SwEnforcedAuthorizations(attested_key_characteristics);
+ // The client-specified CREATION_DATETIME should be in sw_enforced.
+ // Its presence will also trigger verify_attestation_record() to check that it
+ // is in the attestation extension with a matching value.
+ EXPECT_TRUE(sw_enforced.Contains(TAG_CREATION_DATETIME, timestamp))
+ << "expected CREATION_TIMESTAMP in sw_enforced:" << sw_enforced
+ << " not in hw_enforced:" << hw_enforced;
EXPECT_TRUE(verify_attestation_record("foo", "bar", sw_enforced, hw_enforced, SecLevel(),
attested_key_cert_chain[0].encodedCertificate));
@@ -479,6 +488,53 @@
}
}
+TEST_P(AttestKeyTest, MissingChallenge) {
+ for (auto size : ValidKeySizes(Algorithm::RSA)) {
+ /*
+ * Create attestation key.
+ */
+ AttestationKey attest_key;
+ vector<KeyCharacteristics> attest_key_characteristics;
+ vector<Certificate> attest_key_cert_chain;
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .RsaSigningKey(size, 65537)
+ .AttestKey()
+ .SetDefaultValidity(),
+ {} /* attestation signing key */, &attest_key.keyBlob,
+ &attest_key_characteristics, &attest_key_cert_chain));
+
+ EXPECT_EQ(attest_key_cert_chain.size(), 1);
+ EXPECT_TRUE(IsSelfSigned(attest_key_cert_chain)) << "Failed on size " << size;
+
+ /*
+ * Use attestation key to sign RSA / ECDSA key but forget to provide a challenge
+ */
+ attest_key.issuerSubjectName = make_name_from_str("Android Keystore Key");
+ vector<uint8_t> attested_key_blob;
+ vector<KeyCharacteristics> attested_key_characteristics;
+ vector<Certificate> attested_key_cert_chain;
+ EXPECT_EQ(ErrorCode::INVALID_ARGUMENT,
+ GenerateKey(AuthorizationSetBuilder()
+ .RsaSigningKey(2048, 65537)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AttestationApplicationId("bar")
+ .SetDefaultValidity(),
+ attest_key, &attested_key_blob, &attested_key_characteristics,
+ &attested_key_cert_chain));
+
+ EXPECT_EQ(ErrorCode::INVALID_ARGUMENT,
+ GenerateKey(AuthorizationSetBuilder()
+ .EcdsaSigningKey(EcCurve::P_256)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AttestationApplicationId("bar")
+ .SetDefaultValidity(),
+ attest_key, &attested_key_blob, &attested_key_characteristics,
+ &attested_key_cert_chain));
+
+ CheckedDeleteKey(&attest_key.keyBlob);
+ }
+}
+
TEST_P(AttestKeyTest, AllEcCurves) {
for (auto curve : ValidCurves()) {
/*
@@ -494,6 +550,7 @@
{} /* attestation siging key */, &attest_key.keyBlob,
&attest_key_characteristics, &attest_key_cert_chain));
+ ASSERT_GT(attest_key_cert_chain.size(), 0);
EXPECT_EQ(attest_key_cert_chain.size(), 1);
EXPECT_TRUE(IsSelfSigned(attest_key_cert_chain)) << "Failed on curve " << curve;
@@ -577,6 +634,7 @@
{} /* attestation siging key */, &non_attest_key.keyBlob,
&non_attest_key_characteristics, &non_attest_key_cert_chain));
+ ASSERT_GT(non_attest_key_cert_chain.size(), 0);
EXPECT_EQ(non_attest_key_cert_chain.size(), 1);
EXPECT_TRUE(IsSelfSigned(non_attest_key_cert_chain));
diff --git a/security/keymint/aidl/vts/functional/DeviceUniqueAttestationTest.cpp b/security/keymint/aidl/vts/functional/DeviceUniqueAttestationTest.cpp
index 7009c6e..6f0ee4e 100644
--- a/security/keymint/aidl/vts/functional/DeviceUniqueAttestationTest.cpp
+++ b/security/keymint/aidl/vts/functional/DeviceUniqueAttestationTest.cpp
@@ -70,13 +70,12 @@
.Digest(Digest::SHA_2_256)
.Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)
.Authorization(TAG_INCLUDE_UNIQUE_ID)
- .Authorization(TAG_NO_AUTH_REQUIRED)
.AttestationChallenge("challenge")
.AttestationApplicationId("foo")
.Authorization(TAG_DEVICE_UNIQUE_ATTESTATION),
&key_blob, &key_characteristics);
- ASSERT_TRUE(result == ErrorCode::UNSUPPORTED_TAG);
+ ASSERT_EQ(result, ErrorCode::INVALID_ARGUMENT);
}
/*
@@ -102,7 +101,7 @@
.Authorization(TAG_DEVICE_UNIQUE_ATTESTATION),
&key_blob, &key_characteristics);
- ASSERT_TRUE(result == ErrorCode::UNSUPPORTED_TAG);
+ ASSERT_EQ(result, ErrorCode::INVALID_ARGUMENT);
}
/*
@@ -124,7 +123,6 @@
.Digest(Digest::SHA_2_256)
.Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)
.Authorization(TAG_INCLUDE_UNIQUE_ID)
- .Authorization(TAG_NO_AUTH_REQUIRED)
.AttestationChallenge("challenge")
.AttestationApplicationId("foo")
.Authorization(TAG_DEVICE_UNIQUE_ATTESTATION),
diff --git a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
index f0dfff1..4789204 100644
--- a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
+++ b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
@@ -167,6 +167,7 @@
securityLevel_ = info.securityLevel;
name_.assign(info.keyMintName.begin(), info.keyMintName.end());
author_.assign(info.keyMintAuthorName.begin(), info.keyMintAuthorName.end());
+ timestamp_token_required_ = info.timestampTokenRequired;
os_version_ = getOsVersion();
os_patch_level_ = getOsPatchlevel();
@@ -273,7 +274,8 @@
ErrorCode KeyMintAidlTestBase::ImportWrappedKey(string wrapped_key, string wrapping_key,
const AuthorizationSet& wrapping_key_desc,
string masking_key,
- const AuthorizationSet& unwrapping_params) {
+ const AuthorizationSet& unwrapping_params,
+ int64_t password_sid, int64_t biometric_sid) {
EXPECT_EQ(ErrorCode::OK, ImportKey(wrapping_key_desc, KeyFormat::PKCS8, wrapping_key));
key_characteristics_.clear();
@@ -282,8 +284,7 @@
Status result = keymint_->importWrappedKey(
vector<uint8_t>(wrapped_key.begin(), wrapped_key.end()), key_blob_,
vector<uint8_t>(masking_key.begin(), masking_key.end()),
- unwrapping_params.vector_data(), 0 /* passwordSid */, 0 /* biometricSid */,
- &creationResult);
+ unwrapping_params.vector_data(), password_sid, biometric_sid, &creationResult);
if (result.isOk()) {
EXPECT_PRED2(KeyCharacteristicsBasicallyValid, SecLevel(),
@@ -332,6 +333,11 @@
return GetReturnErrorCode(result);
}
+ErrorCode KeyMintAidlTestBase::DestroyAttestationIds() {
+ Status result = keymint_->destroyAttestationIds();
+ return GetReturnErrorCode(result);
+}
+
void KeyMintAidlTestBase::CheckedDeleteKey(vector<uint8_t>* key_blob, bool keep_key_blob) {
ErrorCode result = DeleteKey(key_blob, keep_key_blob);
EXPECT_TRUE(result == ErrorCode::OK || result == ErrorCode::UNIMPLEMENTED) << result << endl;
@@ -654,6 +660,18 @@
return ciphertext;
}
+string KeyMintAidlTestBase::EncryptMessage(const string& message, BlockMode block_mode,
+ PaddingMode padding, uint8_t mac_length_bits) {
+ SCOPED_TRACE("EncryptMessage");
+ auto params = AuthorizationSetBuilder()
+ .BlockMode(block_mode)
+ .Padding(padding)
+ .Authorization(TAG_MAC_LENGTH, mac_length_bits);
+ AuthorizationSet out_params;
+ string ciphertext = EncryptMessage(message, params, &out_params);
+ return ciphertext;
+}
+
string KeyMintAidlTestBase::DecryptMessage(const vector<uint8_t>& key_blob,
const string& ciphertext,
const AuthorizationSet& params) {
diff --git a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.h b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.h
index 88998d5..cb38938 100644
--- a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.h
+++ b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.h
@@ -95,13 +95,22 @@
ErrorCode ImportWrappedKey(string wrapped_key, string wrapping_key,
const AuthorizationSet& wrapping_key_desc, string masking_key,
- const AuthorizationSet& unwrapping_params);
+ const AuthorizationSet& unwrapping_params, int64_t password_sid,
+ int64_t biometric_sid);
+ ErrorCode ImportWrappedKey(string wrapped_key, string wrapping_key,
+ const AuthorizationSet& wrapping_key_desc, string masking_key,
+ const AuthorizationSet& unwrapping_params) {
+ return ImportWrappedKey(wrapped_key, wrapping_key, wrapping_key_desc, masking_key,
+ unwrapping_params, 0 /* password_sid */, 0 /* biometric_sid */);
+ }
ErrorCode DeleteKey(vector<uint8_t>* key_blob, bool keep_key_blob = false);
ErrorCode DeleteKey(bool keep_key_blob = false);
ErrorCode DeleteAllKeys();
+ ErrorCode DestroyAttestationIds();
+
void CheckedDeleteKey(vector<uint8_t>* key_blob, bool keep_key_blob = false);
void CheckedDeleteKey();
@@ -166,6 +175,8 @@
const vector<uint8_t>& iv_in);
string EncryptMessage(const string& message, BlockMode block_mode, PaddingMode padding,
uint8_t mac_length_bits, const vector<uint8_t>& iv_in);
+ string EncryptMessage(const string& message, BlockMode block_mode, PaddingMode padding,
+ uint8_t mac_length_bits);
string DecryptMessage(const vector<uint8_t>& key_blob, const string& ciphertext,
const AuthorizationSet& params);
@@ -268,6 +279,7 @@
uint32_t os_version_;
uint32_t os_patch_level_;
uint32_t vendor_patch_level_;
+ bool timestamp_token_required_;
SecurityLevel securityLevel_;
string name_;
diff --git a/security/keymint/aidl/vts/functional/KeyMintTest.cpp b/security/keymint/aidl/vts/functional/KeyMintTest.cpp
index f9a99aa..cd7d603 100644
--- a/security/keymint/aidl/vts/functional/KeyMintTest.cpp
+++ b/security/keymint/aidl/vts/functional/KeyMintTest.cpp
@@ -115,109 +115,296 @@
return b;
}
-string rsa_key =
- hex2str("30820275020100300d06092a864886f70d01010105000482025f3082025b"
- "02010002818100c6095409047d8634812d5a218176e45c41d60a75b13901"
- "f234226cffe776521c5a77b9e389417b71c0b6a44d13afe4e4a2805d46c9"
- "da2935adb1ff0c1f24ea06e62b20d776430a4d435157233c6f916783c30e"
- "310fcbd89b85c2d56771169785ac12bca244abda72bfb19fc44d27c81e1d"
- "92de284f4061edfd99280745ea6d2502030100010281801be0f04d9cae37"
- "18691f035338308e91564b55899ffb5084d2460e6630257e05b3ceab0297"
- "2dfabcd6ce5f6ee2589eb67911ed0fac16e43a444b8c861e544a05933657"
- "72f8baf6b22fc9e3c5f1024b063ac080a7b2234cf8aee8f6c47bbf4fd3ac"
- "e7240290bef16c0b3f7f3cdd64ce3ab5912cf6e32f39ab188358afcccd80"
- "81024100e4b49ef50f765d3b24dde01aceaaf130f2c76670a91a61ae08af"
- "497b4a82be6dee8fcdd5e3f7ba1cfb1f0c926b88f88c92bfab137fba2285"
- "227b83c342ff7c55024100ddabb5839c4c7f6bf3d4183231f005b31aa58a"
- "ffdda5c79e4cce217f6bc930dbe563d480706c24e9ebfcab28a6cdefd324"
- "b77e1bf7251b709092c24ff501fd91024023d4340eda3445d8cd26c14411"
- "da6fdca63c1ccd4b80a98ad52b78cc8ad8beb2842c1d280405bc2f6c1bea"
- "214a1d742ab996b35b63a82a5e470fa88dbf823cdd02401b7b57449ad30d"
- "1518249a5f56bb98294d4b6ac12ffc86940497a5a5837a6cf946262b4945"
- "26d328c11e1126380fde04c24f916dec250892db09a6d77cdba351024077"
- "62cd8f4d050da56bd591adb515d24d7ccd32cca0d05f866d583514bd7324"
- "d5f33645e8ed8b4a1cb3cc4a1d67987399f2a09f5b3fb68c88d5e5d90ac3"
- "3492d6");
+string rsa_key = hex2str(
+ // RFC 5208 s5
+ "30820275" // SEQUENCE length 0x275 (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "300d" // SEQUENCE length 0x0d (AlgorithmIdentifier) {
+ "0609" // OBJECT IDENTIFIER length 9 (algorithm)
+ "2a864886f70d010101" // 1.2.840.113549.1.1.1 (rsaEncryption)
+ "0500" // NULL (parameters)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "0482025f" // OCTET STRING length 0x25f (privateKey) holding...
+ // RFC 8017 A.1.2
+ "3082025b" // SEQUENCE length 0x25b (RSAPrivateKey) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "028181" // INTEGER length 0x81 value (modulus) ...
+ "00c6095409047d8634812d5a218176e4"
+ "5c41d60a75b13901f234226cffe77652"
+ "1c5a77b9e389417b71c0b6a44d13afe4"
+ "e4a2805d46c9da2935adb1ff0c1f24ea"
+ "06e62b20d776430a4d435157233c6f91"
+ "6783c30e310fcbd89b85c2d567711697"
+ "85ac12bca244abda72bfb19fc44d27c8"
+ "1e1d92de284f4061edfd99280745ea6d"
+ "25"
+ "0203010001" // INTEGER length 3 value 0x10001 (publicExponent)
+ "028180" // INTEGER length 0x80 (privateExponent) value...
+ "1be0f04d9cae3718691f035338308e91"
+ "564b55899ffb5084d2460e6630257e05"
+ "b3ceab02972dfabcd6ce5f6ee2589eb6"
+ "7911ed0fac16e43a444b8c861e544a05"
+ "93365772f8baf6b22fc9e3c5f1024b06"
+ "3ac080a7b2234cf8aee8f6c47bbf4fd3"
+ "ace7240290bef16c0b3f7f3cdd64ce3a"
+ "b5912cf6e32f39ab188358afcccd8081"
+ "0241" // INTEGER length 0x41 (prime1)
+ "00e4b49ef50f765d3b24dde01aceaaf1"
+ "30f2c76670a91a61ae08af497b4a82be"
+ "6dee8fcdd5e3f7ba1cfb1f0c926b88f8"
+ "8c92bfab137fba2285227b83c342ff7c"
+ "55"
+ "0241" // INTEGER length 0x41 (prime2)
+ "00ddabb5839c4c7f6bf3d4183231f005"
+ "b31aa58affdda5c79e4cce217f6bc930"
+ "dbe563d480706c24e9ebfcab28a6cdef"
+ "d324b77e1bf7251b709092c24ff501fd"
+ "91"
+ "0240" // INTEGER length 0x40 (exponent1)
+ "23d4340eda3445d8cd26c14411da6fdc"
+ "a63c1ccd4b80a98ad52b78cc8ad8beb2"
+ "842c1d280405bc2f6c1bea214a1d742a"
+ "b996b35b63a82a5e470fa88dbf823cdd"
+ "0240" // INTEGER length 0x40 (exponent2)
+ "1b7b57449ad30d1518249a5f56bb9829"
+ "4d4b6ac12ffc86940497a5a5837a6cf9"
+ "46262b494526d328c11e1126380fde04"
+ "c24f916dec250892db09a6d77cdba351"
+ "0240" // INTEGER length 0x40 (coefficient)
+ "7762cd8f4d050da56bd591adb515d24d"
+ "7ccd32cca0d05f866d583514bd7324d5"
+ "f33645e8ed8b4a1cb3cc4a1d67987399"
+ "f2a09f5b3fb68c88d5e5d90ac33492d6"
+ // } end SEQUENCE (PrivateKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
/*
* DER-encoded PKCS#8 format RSA key. Generated using:
*
* openssl genrsa 2048 | openssl pkcs8 -topk8 -nocrypt -outform der | hexdump -e '30/1 "%02X" "\n"'
*/
-string rsa_2048_key =
- hex2str("308204BD020100300D06092A864886F70D0101010500048204A7308204A3"
- "0201000282010100BEBC342B56D443B1299F9A6A7056E80A897E318476A5"
- "A18029E63B2ED739A61791D339F58DC763D9D14911F2EDEC383DEE11F631"
- "9B44510E7A3ECD9B79B97382E49500ACF8117DC89CAF0E621F77756554A2"
- "FD4664BFE7AB8B59AB48340DBFA27B93B5A81F6ECDEB02D0759307128DF3"
- "E3BAD4055C8B840216DFAA5700670E6C5126F0962FCB70FF308F25049164"
- "CCF76CC2DA66A7DD9A81A714C2809D69186133D29D84568E892B6FFBF319"
- "9BDB14383EE224407F190358F111A949552ABA6714227D1BD7F6B20DD0CB"
- "88F9467B719339F33BFF35B3870B3F62204E4286B0948EA348B524544B5F"
- "9838F29EE643B079EEF8A713B220D7806924CDF7295070C5020301000102"
- "82010069F377F35F2F584EF075353CCD1CA99738DB3DBC7C7FF35F9366CE"
- "176DFD1B135AB10030344ABF5FBECF1D4659FDEF1C0FC430834BE1BE3911"
- "951377BB3D563A2EA9CA8F4AD9C48A8CE6FD516A735C662686C7B4B3C09A"
- "7B8354133E6F93F790D59EAEB92E84C9A4339302CCE28FDF04CCCAFA7DE3"
- "F3A827D4F6F7D38E68B0EC6AB706645BF074A4E4090D06FB163124365FD5"
- "EE7A20D350E9958CC30D91326E1B292E9EF5DB408EC42DAF737D20149704"
- "D0A678A0FB5B5446863B099228A352D604BA8091A164D01D5AB05397C71E"
- "AD20BE2A08FC528FE442817809C787FEE4AB97F97B9130D022153EDC6EB6"
- "CBE7B0F8E3473F2E901209B5DB10F93604DB0102818100E83C0998214941"
- "EA4F9293F1B77E2E99E6CF305FAF358238E126124FEAF2EB9724B2EA7B78"
- "E6032343821A80E55D1D88FB12D220C3F41A56142FEC85796D1917F1E8C7"
- "74F142B67D3D6E7B7E6B4383E94DB5929089DBB346D5BDAB40CC2D96EE04"
- "09475E175C63BF78CFD744136740838127EA723FF3FE7FA368C1311B4A4E"
- "0502818100D240FCC0F5D7715CDE21CB2DC86EA146132EA3B06F61FF2AF5"
- "4BF38473F59DADCCE32B5F4CC32DD0BA6F509347B4B5B1B58C39F95E4798"
- "CCBB43E83D0119ACF532F359CA743C85199F0286610E200997D731291717"
- "9AC9B67558773212EC961E8BCE7A3CC809BC5486A96E4B0E6AF394D94E06"
- "6A0900B7B70E82A44FB30053C102818100AD15DA1CBD6A492B66851BA8C3"
- "16D38AB700E2CFDDD926A658003513C54BAA152B30021D667D20078F500F"
- "8AD3E7F3945D74A891ED1A28EAD0FEEAEC8C14A8E834CF46A13D1378C99D"
- "18940823CFDD27EC5810D59339E0C34198AC638E09C87CBB1B634A9864AE"
- "9F4D5EB2D53514F67B4CAEC048C8AB849A02E397618F3271350281801FA2"
- "C1A5331880A92D8F3E281C617108BF38244F16E352E69ED417C7153F9EC3"
- "18F211839C643DCF8B4DD67CE2AC312E95178D5D952F06B1BF779F491692"
- "4B70F582A23F11304E02A5E7565AE22A35E74FECC8B6FDC93F92A1A37703"
- "E4CF0E63783BD02EB716A7ECBBFA606B10B74D01579522E7EF84D91FC522"
- "292108D902C1028180796FE3825F9DCC85DF22D58690065D93898ACD65C0"
- "87BEA8DA3A63BF4549B795E2CD0E3BE08CDEBD9FCF1720D9CDC5070D74F4"
- "0DED8E1102C52152A31B6165F83A6722AECFCC35A493D7634664B888A08D"
- "3EB034F12EA28BFEE346E205D334827F778B16ED40872BD29FCB36536B6E"
- "93FFB06778696B4A9D81BB0A9423E63DE5");
+string rsa_2048_key = hex2str(
+ // RFC 5208 s5
+ "308204BD" // SEQUENCE length 0x4bd (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "300D" // SEQUENCE length 0x0d (AlgorithmIdentifier) {
+ "0609" // OBJECT IDENTIFIER length 9 (algorithm)
+ "2A864886F70D010101" // 1.2.840.113549.1.1.1 (rsaEncryption)
+ "0500" // NULL (parameters)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "048204A7" // OCTET STRING length 0x25f (privateKey) holding...
+ // RFC 8017 A.1.2
+ "308204A3" // SEQUENCE length 0x4a3 (RSAPrivateKey) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "02820101" // INTEGER length 0x101 value (modulus) ...
+ "00BEBC342B56D443B1299F9A6A7056E8"
+ "0A897E318476A5A18029E63B2ED739A6"
+ "1791D339F58DC763D9D14911F2EDEC38"
+ "3DEE11F6319B44510E7A3ECD9B79B973"
+ "82E49500ACF8117DC89CAF0E621F7775"
+ "6554A2FD4664BFE7AB8B59AB48340DBF"
+ "A27B93B5A81F6ECDEB02D0759307128D"
+ "F3E3BAD4055C8B840216DFAA5700670E"
+ "6C5126F0962FCB70FF308F25049164CC"
+ "F76CC2DA66A7DD9A81A714C2809D6918"
+ "6133D29D84568E892B6FFBF3199BDB14"
+ "383EE224407F190358F111A949552ABA"
+ "6714227D1BD7F6B20DD0CB88F9467B71"
+ "9339F33BFF35B3870B3F62204E4286B0"
+ "948EA348B524544B5F9838F29EE643B0"
+ "79EEF8A713B220D7806924CDF7295070"
+ "C5"
+ "0203010001" // INTEGER length 3 value 0x10001 (publicExponent)
+ "02820100" // INTEGER length 0x100 (privateExponent) value...
+ "69F377F35F2F584EF075353CCD1CA997"
+ "38DB3DBC7C7FF35F9366CE176DFD1B13"
+ "5AB10030344ABF5FBECF1D4659FDEF1C"
+ "0FC430834BE1BE3911951377BB3D563A"
+ "2EA9CA8F4AD9C48A8CE6FD516A735C66"
+ "2686C7B4B3C09A7B8354133E6F93F790"
+ "D59EAEB92E84C9A4339302CCE28FDF04"
+ "CCCAFA7DE3F3A827D4F6F7D38E68B0EC"
+ "6AB706645BF074A4E4090D06FB163124"
+ "365FD5EE7A20D350E9958CC30D91326E"
+ "1B292E9EF5DB408EC42DAF737D201497"
+ "04D0A678A0FB5B5446863B099228A352"
+ "D604BA8091A164D01D5AB05397C71EAD"
+ "20BE2A08FC528FE442817809C787FEE4"
+ "AB97F97B9130D022153EDC6EB6CBE7B0"
+ "F8E3473F2E901209B5DB10F93604DB01"
+ "028181" // INTEGER length 0x81 (prime1)
+ "00E83C0998214941EA4F9293F1B77E2E"
+ "99E6CF305FAF358238E126124FEAF2EB"
+ "9724B2EA7B78E6032343821A80E55D1D"
+ "88FB12D220C3F41A56142FEC85796D19"
+ "17F1E8C774F142B67D3D6E7B7E6B4383"
+ "E94DB5929089DBB346D5BDAB40CC2D96"
+ "EE0409475E175C63BF78CFD744136740"
+ "838127EA723FF3FE7FA368C1311B4A4E"
+ "05"
+ "028181" // INTEGER length 0x81 (prime2)
+ "00D240FCC0F5D7715CDE21CB2DC86EA1"
+ "46132EA3B06F61FF2AF54BF38473F59D"
+ "ADCCE32B5F4CC32DD0BA6F509347B4B5"
+ "B1B58C39F95E4798CCBB43E83D0119AC"
+ "F532F359CA743C85199F0286610E2009"
+ "97D7312917179AC9B67558773212EC96"
+ "1E8BCE7A3CC809BC5486A96E4B0E6AF3"
+ "94D94E066A0900B7B70E82A44FB30053"
+ "C1"
+ "028181" // INTEGER length 0x81 (exponent1)
+ "00AD15DA1CBD6A492B66851BA8C316D3"
+ "8AB700E2CFDDD926A658003513C54BAA"
+ "152B30021D667D20078F500F8AD3E7F3"
+ "945D74A891ED1A28EAD0FEEAEC8C14A8"
+ "E834CF46A13D1378C99D18940823CFDD"
+ "27EC5810D59339E0C34198AC638E09C8"
+ "7CBB1B634A9864AE9F4D5EB2D53514F6"
+ "7B4CAEC048C8AB849A02E397618F3271"
+ "35"
+ "028180" // INTEGER length 0x80 (exponent2)
+ "1FA2C1A5331880A92D8F3E281C617108"
+ "BF38244F16E352E69ED417C7153F9EC3"
+ "18F211839C643DCF8B4DD67CE2AC312E"
+ "95178D5D952F06B1BF779F4916924B70"
+ "F582A23F11304E02A5E7565AE22A35E7"
+ "4FECC8B6FDC93F92A1A37703E4CF0E63"
+ "783BD02EB716A7ECBBFA606B10B74D01"
+ "579522E7EF84D91FC522292108D902C1"
+ "028180" // INTEGER length 0x80 (coefficient)
+ "796FE3825F9DCC85DF22D58690065D93"
+ "898ACD65C087BEA8DA3A63BF4549B795"
+ "E2CD0E3BE08CDEBD9FCF1720D9CDC507"
+ "0D74F40DED8E1102C52152A31B6165F8"
+ "3A6722AECFCC35A493D7634664B888A0"
+ "8D3EB034F12EA28BFEE346E205D33482"
+ "7F778B16ED40872BD29FCB36536B6E93"
+ "FFB06778696B4A9D81BB0A9423E63DE5"
+ // } end SEQUENCE (PrivateKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
-string ec_256_key =
- hex2str("308187020100301306072a8648ce3d020106082a8648ce3d030107046d30"
- "6b0201010420737c2ecd7b8d1940bf2930aa9b4ed3ff941eed09366bc032"
- "99986481f3a4d859a14403420004bf85d7720d07c25461683bc648b4778a"
- "9a14dd8a024e3bdd8c7ddd9ab2b528bbc7aa1b51f14ebbbb0bd0ce21bcc4"
- "1c6eb00083cf3376d11fd44949e0b2183bfe");
+string ec_256_key = hex2str(
+ // RFC 5208 s5
+ "308187" // SEQUENCE length 0x87 (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0 (version)
+ "3013" // SEQUENCE length 0x13 (AlgorithmIdentifier) {
+ "0607" // OBJECT IDENTIFIER length 7 (algorithm)
+ "2a8648ce3d0201" // 1.2.840.10045.2.1 (ecPublicKey)
+ "0608" // OBJECT IDENTIFIER length 8 (param)
+ "2a8648ce3d030107" // 1.2.840.10045.3.1.7 (secp256r1)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "046d" // OCTET STRING length 0x6d (privateKey) holding...
+ "306b" // SEQUENCE length 0x6b (ECPrivateKey)
+ "020101" // INTEGER length 1 value 1 (version)
+ "0420" // OCTET STRING length 0x20 (privateKey)
+ "737c2ecd7b8d1940bf2930aa9b4ed3ff"
+ "941eed09366bc03299986481f3a4d859"
+ "a144" // TAG [1] len 0x44 (publicKey) {
+ "03420004bf85d7720d07c25461683bc6"
+ "48b4778a9a14dd8a024e3bdd8c7ddd9a"
+ "b2b528bbc7aa1b51f14ebbbb0bd0ce21"
+ "bcc41c6eb00083cf3376d11fd44949e0"
+ "b2183bfe"
+ // } end SEQUENCE (ECPrivateKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
-string ec_521_key =
- hex2str("3081EE020100301006072A8648CE3D020106052B810400230481D63081D3"
- "02010104420011458C586DB5DAA92AFAB03F4FE46AA9D9C3CE9A9B7A006A"
- "8384BEC4C78E8E9D18D7D08B5BCFA0E53C75B064AD51C449BAE0258D54B9"
- "4B1E885DED08ED4FB25CE9A1818903818600040149EC11C6DF0FA122C6A9"
- "AFD9754A4FA9513A627CA329E349535A5629875A8ADFBE27DCB932C05198"
- "6377108D054C28C6F39B6F2C9AF81802F9F326B842FF2E5F3C00AB7635CF"
- "B36157FC0882D574A10D839C1A0C049DC5E0D775E2EE50671A208431BB45"
- "E78E70BEFE930DB34818EE4D5C26259F5C6B8E28A652950F9F88D7B4B2C9"
- "D9");
+string ec_521_key = hex2str(
+ // RFC 5208 s5
+ "3081EE" // SEQUENCE length 0xee (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0 (version)
+ "3010" // SEQUENCE length 0x10 (AlgorithmIdentifier) {
+ "0607" // OBJECT IDENTIFIER length 7 (algorithm)
+ "2A8648CE3D0201" // 1.2.840.10045.2.1 (ecPublicKey)
+ "0605" // OBJECT IDENTIFIER length 5 (param)
+ "2B81040023" // 1.3.132.0.35 (secp521r1)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "0481D6" // OCTET STRING length 0xd6 (privateKey) holding...
+ "3081D3" // SEQUENCE length 0xd3 (ECPrivateKey)
+ "020101" // INTEGER length 1 value 1 (version)
+ "0442" // OCTET STRING length 0x42 (privateKey)
+ "0011458C586DB5DAA92AFAB03F4FE46A"
+ "A9D9C3CE9A9B7A006A8384BEC4C78E8E"
+ "9D18D7D08B5BCFA0E53C75B064AD51C4"
+ "49BAE0258D54B94B1E885DED08ED4FB2"
+ "5CE9"
+ "A18189" // TAG [1] len 0x89 (publicKey) {
+ "03818600040149EC11C6DF0FA122C6A9"
+ "AFD9754A4FA9513A627CA329E349535A"
+ "5629875A8ADFBE27DCB932C051986377"
+ "108D054C28C6F39B6F2C9AF81802F9F3"
+ "26B842FF2E5F3C00AB7635CFB36157FC"
+ "0882D574A10D839C1A0C049DC5E0D775"
+ "E2EE50671A208431BB45E78E70BEFE93"
+ "0DB34818EE4D5C26259F5C6B8E28A652"
+ "950F9F88D7B4B2C9D9"
+ // } end SEQUENCE (ECPrivateKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
-string ec_256_key_rfc5915 =
- hex2str("308193020100301306072a8648ce3d020106082a8648ce3d030107047930"
- "770201010420782370a8c8ce5537baadd04dcff079c8158cfa9c67b818b3"
- "8e8d21c9fa750c1da00a06082a8648ce3d030107a14403420004e2cc561e"
- "e701da0ad0ef0d176bb0c919d42e79c393fdc1bd6c4010d85cf2cf8e68c9"
- "05464666f98dad4f01573ba81078b3428570a439ba3229fbc026c550682f");
+string ec_256_key_rfc5915 = hex2str(
+ // RFC 5208 s5
+ "308193" // SEQUENCE length 0x93 (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0 (version)
+ "3013" // SEQUENCE length 0x13 (AlgorithmIdentifier) {
+ "0607" // OBJECT IDENTIFIER length 7 (algorithm)
+ "2a8648ce3d0201" // 1.2.840.10045.2.1 (ecPublicKey)
+ "0608" // OBJECT IDENTIFIER length 8 (param)
+ "2a8648ce3d030107" // 1.2.840.10045.3.1.7 (secp256r1)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "0479" // OCTET STRING length 0x79 (privateKey) holding...
+ // RFC 5915 s3
+ "3077" // SEQUENCE length 0x77 (ECPrivateKey)
+ "020101" // INTEGER length 1 value 1 (version)
+ "0420" // OCTET STRING length 0x42 (privateKey)
+ "782370a8c8ce5537baadd04dcff079c8"
+ "158cfa9c67b818b38e8d21c9fa750c1d"
+ "a00a" // TAG [0] length 0xa (parameters)
+ "0608" // OBJECT IDENTIFIER length 8
+ "2a8648ce3d030107" // 1.2.840.10045.3.1.7 (secp256r1)
+ // } end TAG [0]
+ "a144" // TAG [1] length 0x44 (publicKey) {
+ "0342" // BIT STRING length 0x42
+ "00" // no pad bits
+ "04e2cc561ee701da0ad0ef0d176bb0c9"
+ "19d42e79c393fdc1bd6c4010d85cf2cf"
+ "8e68c905464666f98dad4f01573ba810"
+ "78b3428570a439ba3229fbc026c55068"
+ "2f"
+ // } end SEQUENCE (ECPrivateKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
-string ec_256_key_sec1 =
- hex2str("308187020100301306072a8648ce3d020106082a8648ce3d030107046d30"
- "6b0201010420782370a8c8ce5537baadd04dcff079c8158cfa9c67b818b3"
- "8e8d21c9fa750c1da14403420004e2cc561ee701da0ad0ef0d176bb0c919"
- "d42e79c393fdc1bd6c4010d85cf2cf8e68c905464666f98dad4f01573ba8"
- "1078b3428570a439ba3229fbc026c550682f");
+string ec_256_key_sec1 = hex2str(
+ // RFC 5208 s5
+ "308187" // SEQUENCE length 0x87 (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0 (version)
+ "3013" // SEQUENCE length 0x13 (AlgorithmIdentifier) {
+ "0607" // OBJECT IDENTIFIER length 7 (algorithm)
+ "2a8648ce3d0201" // 1.2.840.10045.2.1 (ecPublicKey)
+ "0608" // OBJECT IDENTIFIER length 8 (param)
+ "2a8648ce3d030107" // 1.2.840.10045.3.1.7 (secp256r1)
+ // } end SEQUENCE (AlgorithmIdentifier)
+ "046d" // OCTET STRING length 0x6d (privateKey) holding...
+ // SEC1-v2 C.4
+ "306b" // SEQUENCE length 0x6b (ECPrivateKey)
+ "020101" // INTEGER length 1 value 0x01 (version)
+ "0420" // OCTET STRING length 0x20 (privateKey)
+ "782370a8c8ce5537baadd04dcff079c8"
+ "158cfa9c67b818b38e8d21c9fa750c1d"
+ "a144" // TAG [1] length 0x44 (publicKey) {
+ "0342" // BIT STRING length 0x42
+ "00" // no pad bits
+ "04e2cc561ee701da0ad0ef0d176bb0c9"
+ "19d42e79c393fdc1bd6c4010d85cf2cf"
+ "8e68c905464666f98dad4f01573ba810"
+ "78b3428570a439ba3229fbc026c55068"
+ "2f"
+ // } end TAG [1] (publicKey)
+ // } end SEQUENCE (PrivateKeyInfo)
+);
struct RSA_Delete {
void operator()(RSA* p) { RSA_free(p); }
@@ -324,6 +511,10 @@
EXPECT_FALSE(auths.Contains(TAG_APPLICATION_DATA));
EXPECT_FALSE(auths.Contains(TAG_AUTH_TIMEOUT, 301U));
+ // None of the tests specify CREATION_DATETIME so check that the KeyMint implementation
+ // never adds it.
+ EXPECT_FALSE(auths.Contains(TAG_CREATION_DATETIME));
+
// Check OS details match the original hardware info.
auto os_ver = auths.GetTagValue(TAG_OS_VERSION);
EXPECT_TRUE(os_ver);
@@ -443,9 +634,8 @@
for (auto padding_mode : InvalidPaddingModes(Algorithm::AES, block_mode)) {
SCOPED_TRACE(testing::Message()
<< "AES-" << key_size << "-" << block_mode << "-" << padding_mode);
- vector<uint8_t> key_blob;
- vector<KeyCharacteristics> key_characteristics;
auto builder = AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
.AesEncryptionKey(key_size)
.BlockMode(block_mode)
.Padding(padding_mode)
@@ -454,14 +644,18 @@
builder.Authorization(TAG_MIN_MAC_LENGTH, 128);
}
- auto result = GenerateKey(builder, &key_blob, &key_characteristics);
+ auto result = GenerateKey(builder);
if (result == ErrorCode::OK) {
// Key creation was OK but has generated a key that cannot be used.
auto params =
AuthorizationSetBuilder().BlockMode(block_mode).Padding(padding_mode);
+ if (block_mode == BlockMode::GCM) {
+ params.Authorization(TAG_MAC_LENGTH, 128);
+ }
auto result = Begin(KeyPurpose::ENCRYPT, params);
EXPECT_TRUE(result == ErrorCode::INCOMPATIBLE_PADDING_MODE ||
- result == ErrorCode::INVALID_KEY_BLOB);
+ result == ErrorCode::INVALID_KEY_BLOB)
+ << "unexpected result: " << result;
} else {
// The KeyMint implementation detected that the generated key
// is unusable.
@@ -499,6 +693,34 @@
}
/*
+ * NewKeyGenerationTest.AesGcmMinMacOutOfRange
+ *
+ * Verifies that specifying an invalid min MAC size for AES key generation returns
+ * UNSUPPORTED_MIN_MAC_LENGTH.
+ */
+TEST_P(NewKeyGenerationTest, AesGcmMinMacOutOfRange) {
+ for (size_t min_mac_len : {88, 136}) {
+ for (auto key_size : ValidKeySizes(Algorithm::AES)) {
+ BlockMode block_mode = BlockMode::GCM;
+ for (auto padding_mode : ValidPaddingModes(Algorithm::AES, block_mode)) {
+ SCOPED_TRACE(testing::Message()
+ << "AES-" << key_size << "-" << block_mode << "-" << padding_mode);
+ vector<uint8_t> key_blob;
+ vector<KeyCharacteristics> key_characteristics;
+ auto builder = AuthorizationSetBuilder()
+ .AesEncryptionKey(key_size)
+ .BlockMode(block_mode)
+ .Padding(padding_mode)
+ .Authorization(TAG_MIN_MAC_LENGTH, min_mac_len)
+ .SetDefaultValidity();
+ EXPECT_EQ(ErrorCode::UNSUPPORTED_MIN_MAC_LENGTH,
+ GenerateKey(builder, &key_blob, &key_characteristics));
+ }
+ }
+ }
+}
+
+/*
* NewKeyGenerationTest.TripleDes
*
* Verifies that keymint can generate all required 3DES key sizes, and that the resulting keys
@@ -660,8 +882,8 @@
/*
* NewKeyGenerationTest.RsaWithAttestation
*
- * Verifies that keymint can generate all required RSA key sizes, and that the resulting keys
- * have correct characteristics.
+ * Verifies that keymint can generate all required RSA key sizes with attestation, and that the
+ * resulting keys have correct characteristics.
*/
TEST_P(NewKeyGenerationTest, RsaWithAttestation) {
auto challenge = "hello";
@@ -1112,6 +1334,20 @@
}
/*
+ * NewKeyGenerationTest.RsaMissingParams
+ *
+ * Verifies that omitting optional tags works.
+ */
+TEST_P(NewKeyGenerationTest, RsaMissingParams) {
+ for (auto key_size : ValidKeySizes(Algorithm::RSA)) {
+ ASSERT_EQ(ErrorCode::OK,
+ GenerateKey(
+ AuthorizationSetBuilder().RsaKey(key_size, 65537).SetDefaultValidity()));
+ CheckedDeleteKey();
+ }
+}
+
+/*
* NewKeyGenerationTest.Ecdsa
*
* Verifies that keymint can generate all required EC key sizes, and that the resulting keys
@@ -1452,7 +1688,7 @@
}
/*
- * NewKeyGenerationTest.EcdsaInvalidCurves
+ * NewKeyGenerationTest.EcdsaAllValidCurves
*
* Verifies that keymint does not support any curve designated as unsupported.
*/
@@ -1603,6 +1839,16 @@
CheckedDeleteKey();
}
}
+ if (SecLevel() == SecurityLevel::STRONGBOX) {
+ // STRONGBOX devices must not support keys larger than 512 bits.
+ size_t key_size = 520;
+ EXPECT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ GenerateKey(AuthorizationSetBuilder()
+ .HmacKey(key_size)
+ .Digest(Digest::SHA_2_256)
+ .Authorization(TAG_MIN_MAC_LENGTH, 256)))
+ << "HMAC key size " << key_size << " unexpectedly valid";
+ }
}
/*
@@ -1636,6 +1882,15 @@
CheckedDeleteKey();
}
}
+
+ // Minimum MAC length must be no more than 512 bits.
+ size_t min_mac_length = 520;
+ EXPECT_EQ(ErrorCode::UNSUPPORTED_MIN_MAC_LENGTH,
+ GenerateKey(AuthorizationSetBuilder()
+ .HmacKey(128)
+ .Digest(Digest::SHA_2_256)
+ .Authorization(TAG_MIN_MAC_LENGTH, min_mac_length)))
+ << "HMAC min mac length " << min_mac_length << " invalid.";
}
/*
@@ -1986,6 +2241,38 @@
}
/*
+ * SigningOperationsTest.RsaNonUniqueParams
+ *
+ * Verifies that an operation with multiple padding modes is rejected.
+ */
+TEST_P(SigningOperationsTest, RsaNonUniqueParams) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .RsaSigningKey(2048, 65537)
+ .Digest(Digest::NONE)
+ .Digest(Digest::SHA1)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .Padding(PaddingMode::NONE)
+ .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)
+ .SetDefaultValidity()));
+
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_PADDING_MODE,
+ Begin(KeyPurpose::SIGN, AuthorizationSetBuilder()
+ .Digest(Digest::NONE)
+ .Padding(PaddingMode::NONE)
+ .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)));
+
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_DIGEST,
+ Begin(KeyPurpose::SIGN, AuthorizationSetBuilder()
+ .Digest(Digest::NONE)
+ .Digest(Digest::SHA1)
+ .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)));
+
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_DIGEST,
+ Begin(KeyPurpose::SIGN,
+ AuthorizationSetBuilder().Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)));
+}
+
+/*
* SigningOperationsTest.RsaUnsupportedPadding
*
* Verifies that RSA operations fail with the correct error (but key gen succeeds) when used
@@ -2002,6 +2289,20 @@
ErrorCode::UNSUPPORTED_PADDING_MODE,
Begin(KeyPurpose::SIGN,
AuthorizationSetBuilder().Digest(Digest::SHA_2_256).Padding(PaddingMode::PKCS7)));
+ CheckedDeleteKey();
+
+ ASSERT_EQ(ErrorCode::OK,
+ GenerateKey(
+ AuthorizationSetBuilder()
+ .RsaSigningKey(2048, 65537)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .Digest(Digest::SHA_2_256 /* supported digest */)
+ .Padding(PaddingMode::RSA_OAEP) /* padding mode for encryption only */
+ .SetDefaultValidity()));
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_PADDING_MODE,
+ Begin(KeyPurpose::SIGN, AuthorizationSetBuilder()
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_OAEP)));
}
/*
@@ -2204,6 +2505,23 @@
}
/*
+ * SigningOperationsTest.EcdsaIncompatibleDigest
+ *
+ * Verifies that using an EC key requires compatible digest.
+ */
+TEST_P(SigningOperationsTest, EcdsaIncompatibleDigest) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .EcdsaSigningKey(256)
+ .Digest(Digest::NONE)
+ .Digest(Digest::SHA1)
+ .SetDefaultValidity()));
+ EXPECT_EQ(ErrorCode::INCOMPATIBLE_DIGEST,
+ Begin(KeyPurpose::SIGN, AuthorizationSetBuilder().Digest(Digest::SHA_2_256)));
+ AbortIfNeeded();
+}
+
+/*
* SigningOperationsTest.AesEcbSign
*
* Verifies that attempts to use AES keys to sign fail in the correct way.
@@ -2264,6 +2582,26 @@
}
/*
+ * SigningOperationsTest.HmacSha256InvalidMacLength
+ *
+ * Verifies that HMAC fails in the correct way when asked to generate a MAC whose length is
+ * not a multiple of 8.
+ */
+TEST_P(SigningOperationsTest, HmacSha256InvalidMacLength) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .HmacKey(128)
+ .Digest(Digest::SHA_2_256)
+ .Authorization(TAG_MIN_MAC_LENGTH, 160)));
+ AuthorizationSet output_params;
+ EXPECT_EQ(ErrorCode::UNSUPPORTED_MAC_LENGTH, Begin(KeyPurpose::SIGN, key_blob_,
+ AuthorizationSetBuilder()
+ .Digest(Digest::SHA_2_256)
+ .Authorization(TAG_MAC_LENGTH, 161),
+ &output_params));
+}
+
+/*
* SigningOperationsTest.HmacSha256TooSmallMacLength
*
* Verifies that HMAC fails in the correct way when asked to generate a MAC smaller than the
@@ -2682,6 +3020,48 @@
}
/*
+ * ImportKeyTest.RsaSuccessWithoutParams
+ *
+ * Verifies that importing and using an RSA key pair without specifying parameters
+ * works correctly.
+ */
+TEST_P(ImportKeyTest, RsaSuccessWithoutParams) {
+ uint32_t key_size;
+ string key;
+
+ if (SecLevel() == SecurityLevel::STRONGBOX) {
+ key_size = 2048;
+ key = rsa_2048_key;
+ } else {
+ key_size = 1024;
+ key = rsa_key;
+ }
+
+ ASSERT_EQ(ErrorCode::OK, ImportKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .SigningKey()
+ .Authorization(TAG_ALGORITHM, Algorithm::RSA)
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_PSS)
+ .SetDefaultValidity(),
+ KeyFormat::PKCS8, key));
+
+ // Key size and public exponent are determined from the imported key material.
+ CheckCryptoParam(TAG_KEY_SIZE, key_size);
+ CheckCryptoParam(TAG_RSA_PUBLIC_EXPONENT, 65537U);
+
+ CheckCryptoParam(TAG_ALGORITHM, Algorithm::RSA);
+ CheckCryptoParam(TAG_DIGEST, Digest::SHA_2_256);
+ CheckCryptoParam(TAG_PADDING, PaddingMode::RSA_PSS);
+ CheckOrigin();
+
+ string message(1024 / 8, 'a');
+ auto params = AuthorizationSetBuilder().Digest(Digest::SHA_2_256).Padding(PaddingMode::RSA_PSS);
+ string signature = SignMessage(message, params);
+ VerifyMessage(message, signature, params);
+}
+
+/*
* ImportKeyTest.RsaKeySizeMismatch
*
* Verifies that importing an RSA key pair with a size that doesn't match the key fails in the
@@ -2884,14 +3264,35 @@
string key = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uint32_t bitlen = key.size() * 8;
for (uint32_t key_size : {bitlen - 1, bitlen + 1, bitlen - 8, bitlen + 8}) {
- ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
- ImportKey(AuthorizationSetBuilder()
+ // Explicit key size doesn't match that of the provided key.
+ auto result = ImportKey(AuthorizationSetBuilder()
.Authorization(TAG_NO_AUTH_REQUIRED)
.AesEncryptionKey(key_size)
.EcbMode()
.Padding(PaddingMode::PKCS7),
- KeyFormat::RAW, key));
+ KeyFormat::RAW, key);
+ ASSERT_TRUE(result == ErrorCode::IMPORT_PARAMETER_MISMATCH ||
+ result == ErrorCode::UNSUPPORTED_KEY_SIZE)
+ << "unexpected result: " << result;
}
+
+ // Explicit key size matches that of the provided key, but it's not a valid size.
+ string long_key = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ ImportKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(long_key.size() * 8)
+ .EcbMode()
+ .Padding(PaddingMode::PKCS7),
+ KeyFormat::RAW, long_key));
+ string short_key = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ ImportKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(short_key.size() * 8)
+ .EcbMode()
+ .Padding(PaddingMode::PKCS7),
+ KeyFormat::RAW, short_key));
}
/*
@@ -2930,14 +3331,34 @@
string key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
uint32_t bitlen = key.size() * 8;
for (uint32_t key_size : {bitlen - 1, bitlen + 1, bitlen - 8, bitlen + 8}) {
- ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
- ImportKey(AuthorizationSetBuilder()
+ // Explicit key size doesn't match that of the provided key.
+ auto result = ImportKey(AuthorizationSetBuilder()
.Authorization(TAG_NO_AUTH_REQUIRED)
.TripleDesEncryptionKey(key_size)
.EcbMode()
.Padding(PaddingMode::PKCS7),
- KeyFormat::RAW, key));
+ KeyFormat::RAW, key);
+ ASSERT_TRUE(result == ErrorCode::IMPORT_PARAMETER_MISMATCH ||
+ result == ErrorCode::UNSUPPORTED_KEY_SIZE)
+ << "unexpected result: " << result;
}
+ // Explicit key size matches that of the provided key, but it's not a valid size.
+ string long_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ ImportKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .TripleDesEncryptionKey(long_key.size() * 8)
+ .EcbMode()
+ .Padding(PaddingMode::PKCS7),
+ KeyFormat::RAW, long_key));
+ string short_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
+ ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
+ ImportKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .TripleDesEncryptionKey(short_key.size() * 8)
+ .EcbMode()
+ .Padding(PaddingMode::PKCS7),
+ KeyFormat::RAW, short_key));
}
/*
@@ -2967,57 +3388,230 @@
INSTANTIATE_KEYMINT_AIDL_TEST(ImportKeyTest);
auto wrapped_key = hex2str(
- "3082017902010004820100934bf94e2aa28a3f83c9f79297250262fbe3276b5a1c91159bbfa3ef8957aac8"
- "4b59b30b455a79c2973480823d8b3863c3deef4a8e243590268d80e18751a0e130f67ce6a1ace9f79b95e0"
- "97474febc981195b1d13a69086c0863f66a7b7fdb48792227b1ac5e2489febdf087ab5486483033a6f001c"
- "a5d1ec1e27f5c30f4cec2642074a39ae68aee552e196627a8e3d867e67a8c01b11e75f13cca0a97ab668b5"
- "0cda07a8ecb7cd8e3dd7009c9636534f6f239cffe1fc8daa466f78b676c7119efb96bce4e69ca2a25d0b34"
- "ed9c3ff999b801597d5220e307eaa5bee507fb94d1fa69f9e519b2de315bac92c36f2ea1fa1df4478c0dde"
- "deae8c70e0233cd098040cd796b02c370f1fa4cc0124f1302e0201033029a1083106020100020101a20302"
- "0120a30402020100a4053103020101a6053103020140bf83770205000420ccd540855f833a5e1480bfd2d3"
- "6faf3aeee15df5beabe2691bc82dde2a7aa910041064c9f689c60ff6223ab6e6999e0eb6e5");
+ // IKeyMintDevice.aidl
+ "30820179" // SEQUENCE length 0x179 (SecureKeyWrapper) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "04820100" // OCTET STRING length 0x100 (encryptedTransportKey)
+ "934bf94e2aa28a3f83c9f79297250262"
+ "fbe3276b5a1c91159bbfa3ef8957aac8"
+ "4b59b30b455a79c2973480823d8b3863"
+ "c3deef4a8e243590268d80e18751a0e1"
+ "30f67ce6a1ace9f79b95e097474febc9"
+ "81195b1d13a69086c0863f66a7b7fdb4"
+ "8792227b1ac5e2489febdf087ab54864"
+ "83033a6f001ca5d1ec1e27f5c30f4cec"
+ "2642074a39ae68aee552e196627a8e3d"
+ "867e67a8c01b11e75f13cca0a97ab668"
+ "b50cda07a8ecb7cd8e3dd7009c963653"
+ "4f6f239cffe1fc8daa466f78b676c711"
+ "9efb96bce4e69ca2a25d0b34ed9c3ff9"
+ "99b801597d5220e307eaa5bee507fb94"
+ "d1fa69f9e519b2de315bac92c36f2ea1"
+ "fa1df4478c0ddedeae8c70e0233cd098"
+ "040c" // OCTET STRING length 0x0c (initializationVector)
+ "d796b02c370f1fa4cc0124f1"
+ "302e" // SEQUENCE length 0x2e (KeyDescription) {
+ "020103" // INTEGER length 1 value 0x03 (keyFormat = RAW)
+ "3029" // SEQUENCE length 0x29 (AuthorizationList) {
+ "a108" // [1] context-specific constructed tag=1 length 0x08 { (purpose)
+ "3106" // SET length 0x06
+ "020100" // INTEGER length 1 value 0x00 (Encrypt)
+ "020101" // INTEGER length 1 value 0x01 (Decrypt)
+ // } end SET
+ // } end [1]
+ "a203" // [2] context-specific constructed tag=2 length 0x02 { (algorithm)
+ "020120" // INTEGER length 1 value 0x20 (AES)
+ // } end [2]
+ "a304" // [3] context-specific constructed tag=3 length 0x04 { (keySize)
+ "02020100" // INTEGER length 2 value 0x100
+ // } end [3]
+ "a405" // [4] context-specific constructed tag=4 length 0x05 { (blockMode)
+ "3103" // SET length 0x03 {
+ "020101" // INTEGER length 1 value 0x01 (ECB)
+ // } end SET
+ // } end [4]
+ "a605" // [6] context-specific constructed tag=6 length 0x05 { (padding)
+ "3103" // SET length 0x03 {
+ "020140" // INTEGER length 1 value 0x40 (PKCS7)
+ // } end SET
+ // } end [5]
+ "bf837702" // [503] context-specific constructed tag=503=0x1F7 length 0x02 {
+ // (noAuthRequired)
+ "0500" // NULL
+ // } end [503]
+ // } end SEQUENCE (AuthorizationList)
+ // } end SEQUENCE (KeyDescription)
+ "0420" // OCTET STRING length 0x20 (encryptedKey)
+ "ccd540855f833a5e1480bfd2d36faf3a"
+ "eee15df5beabe2691bc82dde2a7aa910"
+ "0410" // OCTET STRING length 0x10 (tag)
+ "64c9f689c60ff6223ab6e6999e0eb6e5"
+ // } SEQUENCE (SecureKeyWrapper)
+);
auto wrapped_key_masked = hex2str(
- "3082017902010004820100aad93ed5924f283b4bb5526fbe7a1412f9d9749ec30db9062b29e574a8546f33"
- "c88732452f5b8e6a391ee76c39ed1712c61d8df6213dec1cffbc17a8c6d04c7b30893d8daa9b2015213e21"
- "946821553207f8f9931c4caba23ed3bee28b36947e47f10e0a5c3dc51c988a628daad3e5e1f4005e79c2d5"
- "a96c284b4b8d7e4948f331e5b85dd5a236f85579f3ea1d1b848487470bdb0ab4f81a12bee42c99fe0df4be"
- "e3759453e69ad1d68a809ce06b949f7694a990429b2fe81e066ff43e56a21602db70757922a4bcc23ab89f"
- "1e35da77586775f423e519c2ea394caf48a28d0c8020f1dcf6b3a68ec246f615ae96dae9a079b1f6eb9590"
- "33c1af5c125fd94168040c6d9721d08589581ab49204a3302e0201033029a1083106020100020101a20302"
- "0120a30402020100a4053103020101a6053103020140bf83770205000420a61c6e247e25b3e6e69aa78eb0"
- "3c2d4ac20d1f99a9a024a76f35c8e2cab9b68d04102560c70109ae67c030f00b98b512a670");
+ // IKeyMintDevice.aidl
+ "30820179" // SEQUENCE length 0x179 (SecureKeyWrapper) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "04820100" // OCTET STRING length 0x100 (encryptedTransportKey)
+ "aad93ed5924f283b4bb5526fbe7a1412"
+ "f9d9749ec30db9062b29e574a8546f33"
+ "c88732452f5b8e6a391ee76c39ed1712"
+ "c61d8df6213dec1cffbc17a8c6d04c7b"
+ "30893d8daa9b2015213e219468215532"
+ "07f8f9931c4caba23ed3bee28b36947e"
+ "47f10e0a5c3dc51c988a628daad3e5e1"
+ "f4005e79c2d5a96c284b4b8d7e4948f3"
+ "31e5b85dd5a236f85579f3ea1d1b8484"
+ "87470bdb0ab4f81a12bee42c99fe0df4"
+ "bee3759453e69ad1d68a809ce06b949f"
+ "7694a990429b2fe81e066ff43e56a216"
+ "02db70757922a4bcc23ab89f1e35da77"
+ "586775f423e519c2ea394caf48a28d0c"
+ "8020f1dcf6b3a68ec246f615ae96dae9"
+ "a079b1f6eb959033c1af5c125fd94168"
+ "040c" // OCTET STRING length 0x0c (initializationVector)
+ "6d9721d08589581ab49204a3"
+ "302e" // SEQUENCE length 0x2e (KeyDescription) {
+ "020103" // INTEGER length 1 value 0x03 (keyFormat = RAW)
+ "3029" // SEQUENCE length 0x29 (AuthorizationList) {
+ "a108" // [1] context-specific constructed tag=1 length 0x08 { (purpose)
+ "3106" // SET length 0x06
+ "020100" // INTEGER length 1 value 0x00 (Encrypt)
+ "020101" // INTEGER length 1 value 0x01 (Decrypt)
+ // } end SET
+ // } end [1]
+ "a203" // [2] context-specific constructed tag=2 length 0x02 { (algorithm)
+ "020120" // INTEGER length 1 value 0x20 (AES)
+ // } end [2]
+ "a304" // [3] context-specific constructed tag=3 length 0x04 { (keySize)
+ "02020100" // INTEGER length 2 value 0x100
+ // } end [3]
+ "a405" // [4] context-specific constructed tag=4 length 0x05 { (blockMode
+ "3103" // SET length 0x03 {
+ "020101" // INTEGER length 1 value 0x01 (ECB)
+ // } end SET
+ // } end [4]
+ "a605" // [6] context-specific constructed tag=6 length 0x05 { (padding)
+ "3103" // SET length 0x03 {
+ "020140" // INTEGER length 1 value 0x40 (PKCS7)
+ // } end SET
+ // } end [5]
+ "bf837702" // [503] context-specific constructed tag=503=0x1F7 length 0x02 {
+ // (noAuthRequired)
+ "0500" // NULL
+ // } end [503]
+ // } end SEQUENCE (AuthorizationList)
+ // } end SEQUENCE (KeyDescription)
+ "0420" // OCTET STRING length 0x20 (encryptedKey)
+ "a61c6e247e25b3e6e69aa78eb03c2d4a"
+ "c20d1f99a9a024a76f35c8e2cab9b68d"
+ "0410" // OCTET STRING length 0x10 (tag)
+ "2560c70109ae67c030f00b98b512a670"
+ // } SEQUENCE (SecureKeyWrapper)
+);
auto wrapping_key = hex2str(
- "308204be020100300d06092a864886f70d0101010500048204a8308204a40201000282010100aec367931d"
- "8900ce56b0067f7d70e1fc653f3f34d194c1fed50018fb43db937b06e673a837313d56b1c725150a3fef86"
- "acbddc41bb759c2854eae32d35841efb5c18d82bc90a1cb5c1d55adf245b02911f0b7cda88c421ff0ebafe"
- "7c0d23be312d7bd5921ffaea1347c157406fef718f682643e4e5d33c6703d61c0cf7ac0bf4645c11f5c137"
- "4c3886427411c449796792e0bef75dec858a2123c36753e02a95a96d7c454b504de385a642e0dfc3e60ac3"
- "a7ee4991d0d48b0172a95f9536f02ba13cecccb92b727db5c27e5b2f5cec09600b286af5cf14c42024c61d"
- "dfe71c2a8d7458f185234cb00e01d282f10f8fc6721d2aed3f4833cca2bd8fa62821dd5502030100010282"
- "0100431447b6251908112b1ee76f99f3711a52b6630960046c2de70de188d833f8b8b91e4d785caeeeaf4f"
- "0f74414e2cda40641f7fe24f14c67a88959bdb27766df9e710b630a03adc683b5d2c43080e52bee71e9eae"
- "b6de297a5fea1072070d181c822bccff087d63c940ba8a45f670feb29fb4484d1c95e6d2579ba02aae0a00"
- "900c3ebf490e3d2cd7ee8d0e20c536e4dc5a5097272888cddd7e91f228b1c4d7474c55b8fcd618c4a957bb"
- "ddd5ad7407cc312d8d98a5caf7e08f4a0d6b45bb41c652659d5a5ba05b663737a8696281865ba20fbdd7f8"
- "51e6c56e8cbe0ddbbf24dc03b2d2cb4c3d540fb0af52e034a2d06698b128e5f101e3b51a34f8d8b4f86181"
- "02818100de392e18d682c829266cc3454e1d6166242f32d9a1d10577753e904ea7d08bff841be5bac82a16"
- "4c5970007047b8c517db8f8f84e37bd5988561bdf503d4dc2bdb38f885434ae42c355f725c9a60f91f0788"
- "e1f1a97223b524b5357fdf72e2f696bab7d78e32bf92ba8e1864eab1229e91346130748a6e3c124f9149d7"
- "1c743502818100c95387c0f9d35f137b57d0d65c397c5e21cc251e47008ed62a542409c8b6b6ac7f8967b3"
- "863ca645fcce49582a9aa17349db6c4a95affdae0dae612e1afac99ed39a2d934c880440aed8832f984316"
- "3a47f27f392199dc1202f9a0f9bd08308007cb1e4e7f58309366a7de25f7c3c9b880677c068e1be936e812"
- "88815252a8a102818057ff8ca1895080b2cae486ef0adfd791fb0235c0b8b36cd6c136e52e4085f4ea5a06"
- "3212a4f105a3764743e53281988aba073f6e0027298e1c4378556e0efca0e14ece1af76ad0b030f27af6f0"
- "ab35fb73a060d8b1a0e142fa2647e93b32e36d8282ae0a4de50ab7afe85500a16f43a64719d6e2b9439823"
- "719cd08bcd03178102818100ba73b0bb28e3f81e9bd1c568713b101241acc607976c4ddccc90e65b6556ca"
- "31516058f92b6e09f3b160ff0e374ec40d78ae4d4979fde6ac06a1a400c61dd31254186af30b22c10582a8"
- "a43e34fe949c5f3b9755bae7baa7b7b7a6bd03b38cef55c86885fc6c1978b9cee7ef33da507c9df6b9277c"
- "ff1e6aaa5d57aca528466102818100c931617c77829dfb1270502be9195c8f2830885f57dba869536811e6"
- "864236d0c4736a0008a145af36b8357a7c3d139966d04c4e00934ea1aede3bb6b8ec841dc95e3f579751e2"
- "bfdfe27ae778983f959356210723287b0affcc9f727044d48c373f1babde0724fa17a4fd4da0902c7c9b9b"
- "f27ba61be6ad02dfddda8f4e6822");
+ // RFC 5208 s5
+ "308204be" // SEQUENCE length 0x4be (PrivateKeyInfo) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "300d" // SEQUENCE length 0x0d (AlgorithmIdentifier) {
+ "0609" // OBJECT IDENTIFIER length 0x09 (algorithm)
+ "2a864886f70d010101" // 1.2.840.113549.1.1.1 (RSAES-PKCS1-v1_5 encryption scheme)
+ "0500" // NULL (parameters)
+ // } SEQUENCE (AlgorithmIdentifier)
+ "048204a8" // OCTET STRING len 0x4a8 (privateKey), which contains...
+ // RFC 8017 A.1.2
+ "308204a4" // SEQUENCE len 0x4a4 (RSAPrivateKey) {
+ "020100" // INTEGER length 1 value 0x00 (version)
+ "02820101" // INTEGER length 0x0101 (modulus) value...
+ "00aec367931d8900ce56b0067f7d70e1" // 0x10
+ "fc653f3f34d194c1fed50018fb43db93" // 0x20
+ "7b06e673a837313d56b1c725150a3fef" // 0x30
+ "86acbddc41bb759c2854eae32d35841e" // 0x40
+ "fb5c18d82bc90a1cb5c1d55adf245b02" // 0x50
+ "911f0b7cda88c421ff0ebafe7c0d23be" // 0x60
+ "312d7bd5921ffaea1347c157406fef71" // 0x70
+ "8f682643e4e5d33c6703d61c0cf7ac0b" // 0x80
+ "f4645c11f5c1374c3886427411c44979" // 0x90
+ "6792e0bef75dec858a2123c36753e02a" // 0xa0
+ "95a96d7c454b504de385a642e0dfc3e6" // 0xb0
+ "0ac3a7ee4991d0d48b0172a95f9536f0" // 0xc0
+ "2ba13cecccb92b727db5c27e5b2f5cec" // 0xd0
+ "09600b286af5cf14c42024c61ddfe71c" // 0xe0
+ "2a8d7458f185234cb00e01d282f10f8f" // 0xf0
+ "c6721d2aed3f4833cca2bd8fa62821dd" // 0x100
+ "55" // 0x101
+ "0203010001" // INTEGER length 3 value 0x10001 (publicExponent)
+ "02820100" // INTEGER length 0x100 (privateExponent) value...
+ "431447b6251908112b1ee76f99f3711a" // 0x10
+ "52b6630960046c2de70de188d833f8b8" // 0x20
+ "b91e4d785caeeeaf4f0f74414e2cda40" // 0x30
+ "641f7fe24f14c67a88959bdb27766df9" // 0x40
+ "e710b630a03adc683b5d2c43080e52be" // 0x50
+ "e71e9eaeb6de297a5fea1072070d181c" // 0x60
+ "822bccff087d63c940ba8a45f670feb2" // 0x70
+ "9fb4484d1c95e6d2579ba02aae0a0090" // 0x80
+ "0c3ebf490e3d2cd7ee8d0e20c536e4dc" // 0x90
+ "5a5097272888cddd7e91f228b1c4d747" // 0xa0
+ "4c55b8fcd618c4a957bbddd5ad7407cc" // 0xb0
+ "312d8d98a5caf7e08f4a0d6b45bb41c6" // 0xc0
+ "52659d5a5ba05b663737a8696281865b" // 0xd0
+ "a20fbdd7f851e6c56e8cbe0ddbbf24dc" // 0xe0
+ "03b2d2cb4c3d540fb0af52e034a2d066" // 0xf0
+ "98b128e5f101e3b51a34f8d8b4f86181" // 0x100
+ "028181" // INTEGER length 0x81 (prime1) value...
+ "00de392e18d682c829266cc3454e1d61" // 0x10
+ "66242f32d9a1d10577753e904ea7d08b" // 0x20
+ "ff841be5bac82a164c5970007047b8c5" // 0x30
+ "17db8f8f84e37bd5988561bdf503d4dc" // 0x40
+ "2bdb38f885434ae42c355f725c9a60f9" // 0x50
+ "1f0788e1f1a97223b524b5357fdf72e2" // 0x60
+ "f696bab7d78e32bf92ba8e1864eab122" // 0x70
+ "9e91346130748a6e3c124f9149d71c74" // 0x80
+ "35"
+ "028181" // INTEGER length 0x81 (prime2) value...
+ "00c95387c0f9d35f137b57d0d65c397c" // 0x10
+ "5e21cc251e47008ed62a542409c8b6b6" // 0x20
+ "ac7f8967b3863ca645fcce49582a9aa1" // 0x30
+ "7349db6c4a95affdae0dae612e1afac9" // 0x40
+ "9ed39a2d934c880440aed8832f984316" // 0x50
+ "3a47f27f392199dc1202f9a0f9bd0830" // 0x60
+ "8007cb1e4e7f58309366a7de25f7c3c9" // 0x70
+ "b880677c068e1be936e81288815252a8" // 0x80
+ "a1"
+ "028180" // INTEGER length 0x80 (exponent1) value...
+ "57ff8ca1895080b2cae486ef0adfd791" // 0x10
+ "fb0235c0b8b36cd6c136e52e4085f4ea" // 0x20
+ "5a063212a4f105a3764743e53281988a" // 0x30
+ "ba073f6e0027298e1c4378556e0efca0" // 0x40
+ "e14ece1af76ad0b030f27af6f0ab35fb" // 0x50
+ "73a060d8b1a0e142fa2647e93b32e36d" // 0x60
+ "8282ae0a4de50ab7afe85500a16f43a6" // 0x70
+ "4719d6e2b9439823719cd08bcd031781" // 0x80
+ "028181" // INTEGER length 0x81 (exponent2) value...
+ "00ba73b0bb28e3f81e9bd1c568713b10" // 0x10
+ "1241acc607976c4ddccc90e65b6556ca" // 0x20
+ "31516058f92b6e09f3b160ff0e374ec4" // 0x30
+ "0d78ae4d4979fde6ac06a1a400c61dd3" // 0x40
+ "1254186af30b22c10582a8a43e34fe94" // 0x50
+ "9c5f3b9755bae7baa7b7b7a6bd03b38c" // 0x60
+ "ef55c86885fc6c1978b9cee7ef33da50" // 0x70
+ "7c9df6b9277cff1e6aaa5d57aca52846" // 0x80
+ "61"
+ "028181" // INTEGER length 0x81 (coefficient) value...
+ "00c931617c77829dfb1270502be9195c" // 0x10
+ "8f2830885f57dba869536811e6864236" // 0x20
+ "d0c4736a0008a145af36b8357a7c3d13" // 0x30
+ "9966d04c4e00934ea1aede3bb6b8ec84" // 0x40
+ "1dc95e3f579751e2bfdfe27ae778983f" // 0x50
+ "959356210723287b0affcc9f727044d4" // 0x60
+ "8c373f1babde0724fa17a4fd4da0902c" // 0x70
+ "7c9b9bf27ba61be6ad02dfddda8f4e68" // 0x80
+ "22"
+ // } SEQUENCE
+ // } SEQUENCE ()
+);
string zero_masking_key =
hex2str("0000000000000000000000000000000000000000000000000000000000000000");
@@ -3046,6 +3640,36 @@
EXPECT_EQ(message, plaintext);
}
+/*
+ * ImportWrappedKeyTest.SuccessSidsIgnored
+ *
+ * Verifies that password_sid and biometric_sid are ignored on import if the authorizations don't
+ * include Tag:USER_SECURE_ID.
+ */
+TEST_P(ImportWrappedKeyTest, SuccessSidsIgnored) {
+ auto wrapping_key_desc = AuthorizationSetBuilder()
+ .RsaEncryptionKey(2048, 65537)
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_OAEP)
+ .Authorization(TAG_PURPOSE, KeyPurpose::WRAP_KEY)
+ .SetDefaultValidity();
+
+ int64_t password_sid = 42;
+ int64_t biometric_sid = 24;
+ ASSERT_EQ(ErrorCode::OK,
+ ImportWrappedKey(wrapped_key, wrapping_key, wrapping_key_desc, zero_masking_key,
+ AuthorizationSetBuilder()
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_OAEP),
+ password_sid, biometric_sid));
+
+ string message = "Hello World!";
+ auto params = AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::PKCS7);
+ string ciphertext = EncryptMessage(message, params);
+ string plaintext = DecryptMessage(ciphertext, params);
+ EXPECT_EQ(message, plaintext);
+}
+
TEST_P(ImportWrappedKeyTest, SuccessMasked) {
auto wrapping_key_desc = AuthorizationSetBuilder()
.RsaEncryptionKey(2048, 65537)
@@ -3092,6 +3716,36 @@
.Padding(PaddingMode::RSA_OAEP)));
}
+TEST_P(ImportWrappedKeyTest, WrongPaddingMode) {
+ auto wrapping_key_desc = AuthorizationSetBuilder()
+ .RsaEncryptionKey(2048, 65537)
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_PSS)
+ .Authorization(TAG_PURPOSE, KeyPurpose::WRAP_KEY)
+ .SetDefaultValidity();
+
+ ASSERT_EQ(ErrorCode::INCOMPATIBLE_PADDING_MODE,
+ ImportWrappedKey(wrapped_key, wrapping_key, wrapping_key_desc, zero_masking_key,
+ AuthorizationSetBuilder()
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_OAEP)));
+}
+
+TEST_P(ImportWrappedKeyTest, WrongDigest) {
+ auto wrapping_key_desc = AuthorizationSetBuilder()
+ .RsaEncryptionKey(2048, 65537)
+ .Digest(Digest::SHA_2_512)
+ .Padding(PaddingMode::RSA_OAEP)
+ .Authorization(TAG_PURPOSE, KeyPurpose::WRAP_KEY)
+ .SetDefaultValidity();
+
+ ASSERT_EQ(ErrorCode::INCOMPATIBLE_DIGEST,
+ ImportWrappedKey(wrapped_key, wrapping_key, wrapping_key_desc, zero_masking_key,
+ AuthorizationSetBuilder()
+ .Digest(Digest::SHA_2_256)
+ .Padding(PaddingMode::RSA_OAEP)));
+}
+
INSTANTIATE_KEYMINT_AIDL_TEST(ImportWrappedKeyTest);
typedef KeyMintAidlTestBase EncryptionOperationsTest;
@@ -3102,22 +3756,26 @@
* Verifies that raw RSA encryption works.
*/
TEST_P(EncryptionOperationsTest, RsaNoPaddingSuccess) {
- ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
- .Authorization(TAG_NO_AUTH_REQUIRED)
- .RsaEncryptionKey(2048, 65537)
- .Padding(PaddingMode::NONE)
- .SetDefaultValidity()));
+ for (uint64_t exponent : {3, 65537}) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .RsaEncryptionKey(2048, exponent)
+ .Padding(PaddingMode::NONE)
+ .SetDefaultValidity()));
- string message = string(2048 / 8, 'a');
- auto params = AuthorizationSetBuilder().Padding(PaddingMode::NONE);
- string ciphertext1 = EncryptMessage(message, params);
- EXPECT_EQ(2048U / 8, ciphertext1.size());
+ string message = string(2048 / 8, 'a');
+ auto params = AuthorizationSetBuilder().Padding(PaddingMode::NONE);
+ string ciphertext1 = EncryptMessage(message, params);
+ EXPECT_EQ(2048U / 8, ciphertext1.size());
- string ciphertext2 = EncryptMessage(message, params);
- EXPECT_EQ(2048U / 8, ciphertext2.size());
+ string ciphertext2 = EncryptMessage(message, params);
+ EXPECT_EQ(2048U / 8, ciphertext2.size());
- // Unpadded RSA is deterministic
- EXPECT_EQ(ciphertext1, ciphertext2);
+ // Unpadded RSA is deterministic
+ EXPECT_EQ(ciphertext1, ciphertext2);
+
+ CheckedDeleteKey();
+ }
}
/*
@@ -3244,13 +3902,30 @@
.Padding(PaddingMode::RSA_OAEP)
.Digest(Digest::NONE)
.SetDefaultValidity()));
- string message = "Hello World!";
auto params = AuthorizationSetBuilder().Padding(PaddingMode::RSA_OAEP).Digest(Digest::NONE);
EXPECT_EQ(ErrorCode::INCOMPATIBLE_DIGEST, Begin(KeyPurpose::ENCRYPT, params));
}
/*
+ * EncryptionOperationsTest.RsaOaepInvalidPadding
+ *
+ * Verifies that RSA-OAEP encryption operations fail in the correct way when asked to operate
+ * with a padding value that is only suitable for signing/verifying.
+ */
+TEST_P(EncryptionOperationsTest, RsaOaepInvalidPadding) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .RsaEncryptionKey(2048, 65537)
+ .Padding(PaddingMode::RSA_PSS)
+ .Digest(Digest::NONE)
+ .SetDefaultValidity()));
+
+ auto params = AuthorizationSetBuilder().Padding(PaddingMode::RSA_PSS).Digest(Digest::NONE);
+ EXPECT_EQ(ErrorCode::UNSUPPORTED_PADDING_MODE, Begin(KeyPurpose::ENCRYPT, params));
+}
+
+/*
* EncryptionOperationsTest.RsaOaepDecryptWithWrongDigest
*
* Verifies that RSA-OAEP encryption operations fail in the correct way when asked to decrypt
@@ -3450,7 +4125,7 @@
/*
* EncryptionOperationsTest.RsaPkcs1TooLarge
*
- * Verifies that RSA PKCS encryption fails in the correct way when the mssage is too large.
+ * Verifies that RSA PKCS encryption fails in the correct way when the message is too large.
*/
TEST_P(EncryptionOperationsTest, RsaPkcs1TooLarge) {
ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
@@ -3535,6 +4210,48 @@
}
/*
+ * EncryptionOperationsTest.AesEcbUnknownTag
+ *
+ * Verifies that AES ECB operations ignore unknown tags.
+ */
+TEST_P(EncryptionOperationsTest, AesEcbUnknownTag) {
+ int32_t unknown_tag_value = ((7 << 28) /* TagType:BOOL */ | 150);
+ Tag unknown_tag = static_cast<Tag>(unknown_tag_value);
+ KeyParameter unknown_param;
+ unknown_param.tag = unknown_tag;
+
+ vector<KeyCharacteristics> key_characteristics;
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::ECB)
+ .Padding(PaddingMode::NONE)
+ .Authorization(unknown_param),
+ &key_blob_, &key_characteristics));
+ ASSERT_GT(key_blob_.size(), 0U);
+
+ // Unknown tags should not be returned in key characteristics.
+ AuthorizationSet hw_enforced = HwEnforcedAuthorizations(key_characteristics);
+ AuthorizationSet sw_enforced = SwEnforcedAuthorizations(key_characteristics);
+ EXPECT_EQ(hw_enforced.find(unknown_tag), -1);
+ EXPECT_EQ(sw_enforced.find(unknown_tag), -1);
+
+ // Encrypt without mentioning the unknown parameter.
+ auto params = AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::NONE);
+ string message = "12345678901234567890123456789012";
+ string ciphertext = EncryptMessage(message, params);
+ EXPECT_EQ(message.size(), ciphertext.size());
+
+ // Decrypt including the unknown parameter.
+ auto decrypt_params = AuthorizationSetBuilder()
+ .BlockMode(BlockMode::ECB)
+ .Padding(PaddingMode::NONE)
+ .Authorization(unknown_param);
+ string plaintext = DecryptMessage(ciphertext, decrypt_params);
+ EXPECT_EQ(message, plaintext);
+}
+
+/*
* EncryptionOperationsTest.AesWrongMode
*
* Verifies that AES encryption fails in the correct way when an unauthorized mode is specified.
@@ -3545,11 +4262,8 @@
.AesEncryptionKey(128)
.Authorization(TAG_BLOCK_MODE, BlockMode::CBC)
.Padding(PaddingMode::NONE)));
-
ASSERT_GT(key_blob_.size(), 0U);
- // Two-block message.
- string message = "12345678901234567890123456789012";
EXPECT_EQ(
ErrorCode::INCOMPATIBLE_BLOCK_MODE,
Begin(KeyPurpose::ENCRYPT,
@@ -3557,6 +4271,55 @@
}
/*
+ * EncryptionOperationsTest.AesWrongPadding
+ *
+ * Verifies that AES encryption fails in the correct way when an unauthorized padding is specified.
+ */
+TEST_P(EncryptionOperationsTest, AesWrongPadding) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::CBC)
+ .Padding(PaddingMode::NONE)));
+ ASSERT_GT(key_blob_.size(), 0U);
+
+ EXPECT_EQ(
+ ErrorCode::INCOMPATIBLE_PADDING_MODE,
+ Begin(KeyPurpose::ENCRYPT,
+ AuthorizationSetBuilder().BlockMode(BlockMode::CBC).Padding(PaddingMode::PKCS7)));
+}
+
+/*
+ * EncryptionOperationsTest.AesInvalidParams
+ *
+ * Verifies that AES encryption fails in the correct way when an duplicate parameters are specified.
+ */
+TEST_P(EncryptionOperationsTest, AesInvalidParams) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::CBC)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::ECB)
+ .Padding(PaddingMode::NONE)
+ .Padding(PaddingMode::PKCS7)));
+ ASSERT_GT(key_blob_.size(), 0U);
+
+ auto result = Begin(KeyPurpose::ENCRYPT, AuthorizationSetBuilder()
+ .BlockMode(BlockMode::CBC)
+ .BlockMode(BlockMode::ECB)
+ .Padding(PaddingMode::NONE));
+ EXPECT_TRUE(result == ErrorCode::INCOMPATIBLE_BLOCK_MODE ||
+ result == ErrorCode::UNSUPPORTED_BLOCK_MODE);
+
+ result = Begin(KeyPurpose::ENCRYPT, AuthorizationSetBuilder()
+ .BlockMode(BlockMode::ECB)
+ .Padding(PaddingMode::NONE)
+ .Padding(PaddingMode::PKCS7));
+ EXPECT_TRUE(result == ErrorCode::INCOMPATIBLE_PADDING_MODE ||
+ result == ErrorCode::UNSUPPORTED_PADDING_MODE);
+}
+
+/*
* EncryptionOperationsTest.AesWrongPurpose
*
* Verifies that AES encryption fails in the correct way when an unauthorized purpose is
@@ -3597,25 +4360,30 @@
}
/*
- * EncryptionOperationsTest.AesEcbNoPaddingWrongInputSize
+ * EncryptionOperationsTest.AesEcbCbcNoPaddingWrongInputSize
*
* Verifies that AES encryption fails in the correct way when provided an input that is not a
* multiple of the block size and no padding is specified.
*/
-TEST_P(EncryptionOperationsTest, AesEcbNoPaddingWrongInputSize) {
- ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
- .Authorization(TAG_NO_AUTH_REQUIRED)
- .AesEncryptionKey(128)
- .Authorization(TAG_BLOCK_MODE, BlockMode::ECB)
- .Padding(PaddingMode::NONE)));
- // Message is slightly shorter than two blocks.
- string message(16 * 2 - 1, 'a');
+TEST_P(EncryptionOperationsTest, AesEcbCbcNoPaddingWrongInputSize) {
+ for (BlockMode blockMode : {BlockMode::ECB, BlockMode::CBC}) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, blockMode)
+ .Padding(PaddingMode::NONE)));
+ // Message is slightly shorter than two blocks.
+ string message(16 * 2 - 1, 'a');
- auto params = AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::NONE);
- EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, params));
- string ciphertext;
- EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, Finish(message, &ciphertext));
- EXPECT_EQ(0U, ciphertext.size());
+ auto params = AuthorizationSetBuilder().BlockMode(blockMode).Padding(PaddingMode::NONE);
+ AuthorizationSet out_params;
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, params, &out_params));
+ string ciphertext;
+ EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, Finish(message, &ciphertext));
+ EXPECT_EQ(0U, ciphertext.size());
+
+ CheckedDeleteKey();
+ }
}
/*
@@ -4175,6 +4943,31 @@
}
/*
+ * EncryptionOperationsTest.AesGcmDifferentAutoNonces
+ *
+ * Verifies that encrypting the same data with KeyMint generated nonces produces different outputs.
+ */
+TEST_P(EncryptionOperationsTest, AesGcmDifferentAutoNonces) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)));
+
+ string aad = "foobar";
+ string message = "123456789012345678901234567890123456";
+
+ string ciphertext1 = EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128);
+ string ciphertext2 = EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128);
+ string ciphertext3 = EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128);
+
+ ASSERT_NE(ciphertext1, ciphertext2);
+ ASSERT_NE(ciphertext1, ciphertext3);
+ ASSERT_NE(ciphertext2, ciphertext3);
+}
+
+/*
* EncryptionOperationsTest.AesGcmTooShortTag
*
* Verifies that AES GCM mode fails correctly when a too-short tag length is specified.
@@ -4402,6 +5195,9 @@
EXPECT_EQ(ErrorCode::OK, Update(message, &ciphertext));
EXPECT_EQ(ErrorCode::INVALID_TAG, UpdateAad("foo"));
+ // The failure should have already cancelled the operation.
+ EXPECT_EQ(ErrorCode::INVALID_OPERATION_HANDLE, Abort());
+
op_ = {};
}
@@ -4768,6 +5564,25 @@
}
/*
+ * EncryptionOperationsTest.TripleDesInvalidCallerIv
+ *
+ * Validates that keymint fails correctly when the user supplies an incorrect-size IV.
+ */
+TEST_P(EncryptionOperationsTest, TripleDesInvalidCallerIv) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .TripleDesEncryptionKey(168)
+ .BlockMode(BlockMode::CBC)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .Authorization(TAG_CALLER_NONCE)
+ .Padding(PaddingMode::NONE)));
+ auto params = AuthorizationSetBuilder()
+ .BlockMode(BlockMode::CBC)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_NONCE, AidlBuf("abcdefg"));
+ EXPECT_EQ(ErrorCode::INVALID_NONCE, Begin(KeyPurpose::ENCRYPT, params));
+}
+
+/*
* EncryptionOperationsTest.TripleDesCallerIv
*
* Validates that 3DES keys can allow caller-specified IVs, and use them correctly.
@@ -4805,7 +5620,7 @@
/*
* EncryptionOperationsTest, TripleDesCallerNonceProhibited.
*
- * Verifies that 3DES keys without TAG_CALLER_NONCE do not allow caller-specified IVS.
+ * Verifies that 3DES keys without TAG_CALLER_NONCE do not allow caller-specified IVs.
*/
TEST_P(EncryptionOperationsTest, TripleDesCallerNonceProhibited) {
ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
@@ -4853,25 +5668,29 @@
}
/*
- * EncryptionOperationsTest.TripleDesCbcNoPaddingWrongInputSize
+ * EncryptionOperationsTest.TripleDesEcbCbcNoPaddingWrongInputSize
*
* Verifies that unpadded CBC operations reject inputs that are not a multiple of block size.
*/
-TEST_P(EncryptionOperationsTest, TripleDesCbcNoPaddingWrongInputSize) {
- ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
- .TripleDesEncryptionKey(168)
- .BlockMode(BlockMode::CBC)
- .Authorization(TAG_NO_AUTH_REQUIRED)
- .Padding(PaddingMode::NONE)));
- // Message is slightly shorter than two blocks.
- string message = "123456789012345";
+TEST_P(EncryptionOperationsTest, TripleDesEcbCbcNoPaddingWrongInputSize) {
+ for (BlockMode blockMode : {BlockMode::ECB, BlockMode::CBC}) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .TripleDesEncryptionKey(168)
+ .BlockMode(blockMode)
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .Padding(PaddingMode::NONE)));
+ // Message is slightly shorter than two blocks.
+ string message = "123456789012345";
- auto begin_params =
- AuthorizationSetBuilder().BlockMode(BlockMode::CBC).Padding(PaddingMode::NONE);
- AuthorizationSet output_params;
- EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, begin_params, &output_params));
- string ciphertext;
- EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, Finish(message, "", &ciphertext));
+ auto begin_params =
+ AuthorizationSetBuilder().BlockMode(blockMode).Padding(PaddingMode::NONE);
+ AuthorizationSet output_params;
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, begin_params, &output_params));
+ string ciphertext;
+ EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, Finish(message, "", &ciphertext));
+
+ CheckedDeleteKey();
+ }
}
/*
@@ -5444,6 +6263,28 @@
INSTANTIATE_KEYMINT_AIDL_TEST(KeyDeletionTest);
+typedef KeyMintAidlTestBase KeyUpgradeTest;
+
+/**
+ * KeyUpgradeTest.UpgradeInvalidKey
+ *
+ * This test checks that the HAL excepts invalid key blobs..
+ */
+TEST_P(KeyUpgradeTest, UpgradeInvalidKey) {
+ AidlBuf key_blob = AidlBuf("just some garbage data which is not a valid key blob");
+
+ std::vector<uint8_t> new_blob;
+ Status result = keymint_->upgradeKey(key_blob,
+ AuthorizationSetBuilder()
+ .Authorization(TAG_APPLICATION_ID, "clientid")
+ .Authorization(TAG_APPLICATION_DATA, "appdata")
+ .vector_data(),
+ &new_blob);
+ ASSERT_EQ(ErrorCode::INVALID_KEY_BLOB, GetReturnErrorCode(result));
+}
+
+INSTANTIATE_KEYMINT_AIDL_TEST(KeyUpgradeTest);
+
using UpgradeKeyTest = KeyMintAidlTestBase;
/*
@@ -5667,6 +6508,17 @@
INSTANTIATE_KEYMINT_AIDL_TEST(KeyAgreementTest);
+using DestroyAttestationIdsTest = KeyMintAidlTestBase;
+
+// This is a problematic test, as it can render the device under test permanently unusable.
+// Re-enable and run at your own risk.
+TEST_P(DestroyAttestationIdsTest, DISABLED_DestroyTest) {
+ auto result = DestroyAttestationIds();
+ EXPECT_TRUE(result == ErrorCode::OK || result == ErrorCode::UNIMPLEMENTED);
+}
+
+INSTANTIATE_KEYMINT_AIDL_TEST(DestroyAttestationIdsTest);
+
using EarlyBootKeyTest = KeyMintAidlTestBase;
TEST_P(EarlyBootKeyTest, CreateEarlyBootKeys) {
@@ -5679,7 +6531,7 @@
CheckedDeleteKey(&ecdsaKeyData.blob);
}
-// This is a more comprenhensive test, but it can only be run on a machine which is still in early
+// This is a more comprehensive test, but it can only be run on a machine which is still in early
// boot stage, which no proper Android device is by the time we can run VTS. To use this,
// un-disable it and modify vold to remove the call to earlyBootEnded(). Running the test will end
// early boot, so you'll have to reboot between runs.
@@ -5747,7 +6599,7 @@
EXPECT_EQ(ErrorCode::OK, UseEcdsaKey(ecdsaKeyData.blob));
ErrorCode rc = GetReturnErrorCode(
- keyMint().deviceLocked(false /* passwordOnly */, {} /* verificationToken */));
+ keyMint().deviceLocked(false /* passwordOnly */, {} /* timestampToken */));
ASSERT_EQ(ErrorCode::OK, rc);
EXPECT_EQ(ErrorCode::DEVICE_LOCKED, UseAesKey(aesKeyData.blob));
EXPECT_EQ(ErrorCode::DEVICE_LOCKED, UseHmacKey(hmacKeyData.blob));
diff --git a/sensors/1.0/ISensors.hal b/sensors/1.0/ISensors.hal
index 8d41de2..0e172ef 100644
--- a/sensors/1.0/ISensors.hal
+++ b/sensors/1.0/ISensors.hal
@@ -103,7 +103,7 @@
* Flush adds a FLUSH_COMPLETE metadata event to the end of the "batch mode"
* FIFO for the specified sensor and flushes the FIFO. If the FIFO is empty
* or if the sensor doesn't support batching (FIFO size zero), return
- * SUCCESS and add a trivial FLUSH_COMPLETE event added to the event stream.
+ * OK and add a trivial FLUSH_COMPLETE event added to the event stream.
* This applies to all sensors other than one-shot sensors. If the sensor
* is a one-shot sensor, flush must return BAD_VALUE and not generate any
* flush complete metadata. If the sensor is not active at the time flush()
diff --git a/sensors/1.0/types.hal b/sensors/1.0/types.hal
index cbbe92f..ac7be06 100644
--- a/sensors/1.0/types.hal
+++ b/sensors/1.0/types.hal
@@ -1130,7 +1130,7 @@
/**
* High performance mode hint. Device is able to use up more power and take
- * more reources to improve throughput and latency in high performance mode.
+ * more resources to improve throughput and latency in high performance mode.
* One possible use case is virtual reality, when sensor latency need to be
* carefully controlled.
* int32_t: 1 or 0, denote if device is in/out of high performance mode,
diff --git a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
index 56bc9cf..1f579ba 100644
--- a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
+++ b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
@@ -190,7 +190,7 @@
});
}
-// Test if sensor list returned is valid
+// Test if sensor hal can switch to different operation modes
TEST_P(SensorsHidlTest, SetOperationMode) {
std::vector<SensorInfo> sensorList = getSensorsList();
@@ -208,7 +208,7 @@
ASSERT_EQ(Result::OK, S()->setOperationMode(OperationMode::NORMAL));
}
-// Test if sensor list returned is valid
+// Test if sensor hal can receive injected events in loopback mode
TEST_P(SensorsHidlTest, InjectSensorEventData) {
std::vector<SensorInfo> sensorList = getSensorsList();
std::vector<SensorInfo> sensorSupportInjection;
diff --git a/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h b/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
index a8e1996..af14009 100644
--- a/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
+++ b/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
@@ -425,7 +425,10 @@
return;
}
- batchingPeriodInNs = std::min(batchingPeriodInNs, maxBatchingTestTimeNs);
+ if (batchingPeriodInNs > maxBatchingTestTimeNs) {
+ batchingPeriodInNs = maxBatchingTestTimeNs;
+ minFifoCount = (uint32_t)(batchingPeriodInNs / minSamplingPeriodInNs);
+ }
ALOGI("Test batching for %d ms", (int)(batchingPeriodInNs / 1000 / 1000));
@@ -448,7 +451,7 @@
false /*change collection*/);
// 0.8 + 0.2 times the batching period
- usleep(batchingPeriodInNs / 1000 * 8 / 10);
+ usleep(batchingPeriodInNs / 1000 * 2 / 10);
ASSERT_EQ(flush(handle), Result::OK);
// plus some time for the event to deliver
diff --git a/soundtrigger/2.4/Android.bp b/soundtrigger/2.4/Android.bp
new file mode 100644
index 0000000..44befc3
--- /dev/null
+++ b/soundtrigger/2.4/Android.bp
@@ -0,0 +1,30 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+hidl_interface {
+ name: "android.hardware.soundtrigger@2.4",
+ root: "android.hardware",
+ srcs: [
+ "ISoundTriggerHw.hal",
+ "ISoundTriggerHwCallback.hal",
+ "ISoundTriggerHwGlobalCallback.hal",
+ ],
+ interfaces: [
+ "android.hardware.audio.common@2.0",
+ "android.hardware.soundtrigger@2.0",
+ "android.hardware.soundtrigger@2.1",
+ "android.hardware.soundtrigger@2.2",
+ "android.hardware.soundtrigger@2.3",
+ "android.hidl.base@1.0",
+ "android.hidl.safe_union@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/soundtrigger/2.4/ISoundTriggerHw.hal b/soundtrigger/2.4/ISoundTriggerHw.hal
new file mode 100644
index 0000000..fd39303
--- /dev/null
+++ b/soundtrigger/2.4/ISoundTriggerHw.hal
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger@2.4;
+
+import @2.0::SoundModelHandle;
+import @2.1::ISoundTriggerHw.SoundModel;
+import @2.1::ISoundTriggerHw.PhraseSoundModel;
+import @2.3::ISoundTriggerHw;
+import @2.3::RecognitionConfig;
+import ISoundTriggerHwCallback;
+import ISoundTriggerHwGlobalCallback;
+
+/**
+ * SoundTrigger HAL interface. Used for hardware recognition of hotwords
+ * and other sounds.
+ *
+ * Important notes about the threading model:
+ * ==========================================
+ * Both this interface and the corresponding callback interface use a synchronized calling
+ * convention. This model comes with some advantages, but also with some risks of deadlocks if the
+ * implementation does not handle this correctly. Please consider the following:
+ * - After stopRecognition() returns no more recognition events for that model may be sent. This
+ * implies that any queues holding such events must be flushed before the call returns and that
+ * may imply that callback from the HAL to the client are done while stopRecognition() is blocked.
+ * This is OK, and supported by the framework.
+ * - Similarly, the same relationship applies between unloadModel() and subsequent callbacks to
+ * modelUnloaded().
+ * - Other than these two cases, calls into the HAL *MAY NOT* block on callbacks from the HAL, or
+ * else deadlock conditions may result, which may be handled by rebooting of the HAL process and
+ * cause service outages.
+ *
+ * Similarly, it is expected that a single call to startRecognition() generates at most one event
+ * (the model automatically becomes stopped when the event occurs, until explicitly started again)
+ * and that after a modelUnloaded() event no more events would be sent regarding the model.
+ * Note that a getModelState() call may generate a recognition event, but this event DOES NOT modify
+ * the model state - the model remains started.
+ *
+ * The HAL is expected to correctly handle a stopRecognition() call even after it sent an event
+ * indicating that recognition is stopped and an unloadModel() call even after it sent an event
+ * indicating that it has been unloaded. This is required in order to prevent race conditions
+ * between these calls. This also implies that model handles should generally not be reused until
+ * explicitly unloaded. To avoid the rare possibility of running out of handles, the framework will
+ * call unloadModel() on models that have been preemptively unloaded by the HAL.
+ *
+ * Due to the asynchronous nature of recognition events and preemptive model unloading, the HAL must
+ * correctly handle requests that would have been valid before an event has been delivered, but
+ * became moot as result of the event. Namely:
+ * - stopRecognition() may be called on a model that has already delivered an event and became
+ * inactive as a result. The HAL must return a successful return code in this case.
+ * - Furthermore, if a model is preemptively unloaded after it triggers (typically, this would
+ * happen when it is first aborted and immediately preemptively unloaded), stopRecognition() may
+ * be called on it. The HAL must return a successful return code in this case.
+ * - startRecognition() may be called on a model that has been preemptively unloaded. In this case,
+ * the HAL must return -EBUSY to indicate that the operation is temporarily unsuccessful.
+ * - unloadSoundModel() may be called on a model that has been preemptively unloaded. The HAL must
+ * return a successful return code in this case.
+ *
+ * Important notes about resource constraints and concurrency
+ * =========================================================
+ * Up until this version, the framework would enforce concurrency constraints expressed by the
+ * Properties presented by the soundtrigger instance. These include constraints on the maximum
+ * amount of models that can be loaded at the same time and on running recognition while capturing
+ * from the microphone.
+ * This version changes the approach for how these constraints are modeled, both offering the HAL
+ * implementation more flexibility and simplifying the framework's job in enforcing these
+ * limitations. Note that there is no change for how the framework behaves with earlier versions,
+ * everything described below only applies to this version and onward.
+ * The way this is achieved is as following:
+ * - The framework will no longer enforce constraints on concurrent loading of models, as expressed
+ * in the Properties.maxSoundModels field (this property is merely a hint at this point and may be
+ * deprecated in the future.
+ * - The framework will no longer enforce constraints on concurrency of audio recording and
+ * soundtrigger operation, as expressed in the Properties.concurrentCapture field (this property
+ * is merely a hint at this point and may be deprecated in the future).
+ * - The framework will no longer enforce constraints on concurrent loading of models, as expressed
+ * in the Properties (these properties are merely hints at this point and may be deprecated in the
+ * future.
+ * - The HAL implementation is free to reject starting of any model at any time by having the
+ * respective start*() method return -EBUSY.
+ * - The HAL implementation is free to reject loading of any model at any time by having the
+ * respective load*() method return -EBUSY.
+ * - The HAL implementation is free to preemptively stop a previously started model at its own
+ * discretion (for example, if a higher priority use-case which cannot coexist with detection
+ * has been requested). The HAL must notify the framework of the preemption by sending a
+ * recognition event with an `ABORT` status. The implementation must NOT attempt to restart the
+ * recognition automatically when conditions change.
+ * - The HAL implementation is free to preemptively unload a previously loaded model at its own
+ * discretion (for example, if a higher-priority model is being loaded and the two cannot
+ * coexist). When doing so, it must first abort the detection if active (as per above) and then
+ * notify the framework of the unload using the newly added modelUnloaded callback.
+ * - When conditions change, such that a model that couldn't previously load or start or that had
+ * previously been preemptively stopped or unloaded, the HAL must notify the framework via the
+ * newly added tryAgain() callback. This callback is not a guarantee that any operation would now
+ * succeed, but merely a hint that retrying something that had previously failed, now MAY succeed.
+ * Until this callback arrives, the framework may assume that any operation that had previously
+ * failed or aborted would still fail if retried, so the implementation should not forget to
+ * deliver it. There are no guarantees regarding how the framework may respond to this event and
+ * the order in which it may choose to reload/restart its models. Typically, as result of this
+ * event the framework will make a single attempt per model to bring this model to its desired
+ * state (loaded, started).
+ */
+interface ISoundTriggerHw extends @2.3::ISoundTriggerHw {
+ /**
+ * This will get called at most once per every attachment to the service.
+ *
+ * All events not tied to a specific model should go through this callback.
+ */
+ registerGlobalCallback(ISoundTriggerHwGlobalCallback callback);
+
+ /**
+ * Load a sound model. Once loaded, recognition of this model can be
+ * started and stopped.
+ * The implementation returns a unique handle used by other functions
+ * (unloadSoundModel(), startRecognition*(), etc...
+ *
+ * Must have the exact same semantics as loadSoundModel from ISoundTriggerHw@2.3 except that the
+ * return values have changed and that there is no cookie provided (the implementation may pass
+ * any value to the callback, as it is ignored).
+ *
+ * @param soundModel A SoundModel structure describing the sound model
+ * to load.
+ * @param callback The callback interface on which the soundModelCallback*()
+ * method must be called upon completion and modelUnloaded() upon preempted unload.
+ * @return retval Operation completion status: 0 in case of success,
+ * -EBUSY in case the operation is temporarily unavailable (but possible in general).
+ * @return modelHandle A unique handle assigned by the HAL for use by the
+ * framework when controlling activity for this sound model.
+ */
+ loadSoundModel_2_4(SoundModel soundModel, ISoundTriggerHwCallback callback)
+ generates (int32_t retval, SoundModelHandle modelHandle);
+
+ /**
+ * Load a key phrase sound model. Once loaded, recognition of this model can
+ * be started and stopped. Only one active recognition per model at a time.
+ * The SoundTrigger service must handle concurrent recognition requests by
+ * different users/applications on the same model.
+ * The implementation returns a unique handle used by other functions
+ * (unloadSoundModel(), startRecognition*(), etc...
+ *
+ * Must have the exact same semantics as loadPhraseSoundModel from ISoundTriggerHw@2.3 except
+ * that the return values have changed and that there is no cookie provided (the implementation
+ * may pass any value to the callback, as it is ignored).
+ *
+ * @param soundModel A PhraseSoundModel structure describing the sound model
+ * to load.
+ * @param callback The callback interface on which the soundModelCallback*()
+ * method must be called upon completion and modelUnloaded() upon preempted unload.
+ * @return retval Operation completion status: 0 in case of success,
+ * -EBUSY in case the operation is temporarily unavailable (but possible in general).
+ * @return modelHandle A unique handle assigned by the HAL for use by the
+ * framework when controlling activity for this sound model.
+ */
+ loadPhraseSoundModel_2_4(PhraseSoundModel soundModel, ISoundTriggerHwCallback callback)
+ generates (int32_t retval, SoundModelHandle modelHandle);
+
+ /**
+ * Start recognition on a given model. Only one recognition active
+ * at a time per model. Once recognition succeeds or fails, the callback
+ * associated with the model handle is called.
+ *
+ * Must have the exact same semantics as startRecognition from ISoundTriggerHw@2.3 except that
+ * there are different expectations of the return value and that there is no cookie provided
+ * (the implementation may pass any value to the callback, as it is ignored).
+ *
+ * @param modelHandle the handle of the sound model to use for recognition
+ * @param config A RecognitionConfig structure containing attributes of the
+ * recognition to perform
+ * @param callback The callback interface on which the recognitionCallback()
+ * method must be called upon recognition.
+ * @return retval Operation completion status: 0 in case of success,
+ * -EBUSY in case the operation is temporarily unavailable (but possible in general), or in
+ * case model has been preemtively unloaded.
+ */
+ startRecognition_2_4(SoundModelHandle modelHandle, RecognitionConfig config)
+ generates (int32_t retval);
+};
diff --git a/soundtrigger/2.4/ISoundTriggerHwCallback.hal b/soundtrigger/2.4/ISoundTriggerHwCallback.hal
new file mode 100644
index 0000000..594deb0
--- /dev/null
+++ b/soundtrigger/2.4/ISoundTriggerHwCallback.hal
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger@2.4;
+
+import @2.0::SoundModelHandle;
+import @2.1::ISoundTriggerHwCallback;
+
+/**
+ * SoundTrigger HAL per-model Callback interface.
+ */
+interface ISoundTriggerHwCallback extends @2.1::ISoundTriggerHwCallback {
+ /**
+ * Callback method called by the HAL when a model has been unloaded at the HAL implementation's
+ * discretion. Only a stopped model may be unloaded.
+ * This event is NOT sent as part of an unload sequence initiated by the client.
+ *
+ * @param model The model handle.
+ */
+ modelUnloaded(SoundModelHandle model);
+};
diff --git a/soundtrigger/2.4/ISoundTriggerHwGlobalCallback.hal b/soundtrigger/2.4/ISoundTriggerHwGlobalCallback.hal
new file mode 100644
index 0000000..2f1a977
--- /dev/null
+++ b/soundtrigger/2.4/ISoundTriggerHwGlobalCallback.hal
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger@2.4;
+
+/**
+ * SoundTrigger HAL callback interface for events not associated with a particular model.
+ */
+interface ISoundTriggerHwGlobalCallback {
+ /**
+ * Callback method called by the HAL whenever internal conditions have been made available, such
+ * that a call that would previously have failed with an -EBUSY status may now succeed.
+ * There is no guarantee that any call would succeed following this event. It is merely a hint
+ * to the client that it may retry.
+ * Conversely, any call that have failed previously with -EBUSY is guaranteed to fail again if
+ * retried, until this callback is delivered.
+ */
+ onResourcesAvailable();
+};
diff --git a/soundtrigger/2.4/cli/Android.bp b/soundtrigger/2.4/cli/Android.bp
new file mode 100644
index 0000000..8d0979b
--- /dev/null
+++ b/soundtrigger/2.4/cli/Android.bp
@@ -0,0 +1,17 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+java_binary {
+ name: "sthal_cli_2.4",
+ wrapper: "sthal_cli_2.4",
+ srcs: ["java/**/*.java"],
+ static_libs: [
+ "android.hardware.soundtrigger-V2.4-java",
+ ],
+}
diff --git a/soundtrigger/2.4/cli/OWNERS b/soundtrigger/2.4/cli/OWNERS
new file mode 100644
index 0000000..e21b66e
--- /dev/null
+++ b/soundtrigger/2.4/cli/OWNERS
@@ -0,0 +1 @@
+include /media/java/android/media/soundtrigger_middleware/OWNERS
diff --git a/soundtrigger/2.4/cli/java/android/hardware/soundtrigger/V2_4/cli/SthalCli.java b/soundtrigger/2.4/cli/java/android/hardware/soundtrigger/V2_4/cli/SthalCli.java
new file mode 100644
index 0000000..4931105
--- /dev/null
+++ b/soundtrigger/2.4/cli/java/android/hardware/soundtrigger/V2_4/cli/SthalCli.java
@@ -0,0 +1,401 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.soundtrigger.V2_4.cli;
+
+import android.hardware.soundtrigger.V2_0.PhraseRecognitionExtra;
+import android.hardware.soundtrigger.V2_0.RecognitionMode;
+import android.hardware.soundtrigger.V2_0.SoundModelType;
+import android.hardware.soundtrigger.V2_3.OptionalModelParameterRange;
+import android.hardware.soundtrigger.V2_4.ISoundTriggerHw;
+import android.hardware.soundtrigger.V2_4.ISoundTriggerHwCallback;
+import android.hardware.soundtrigger.V2_4.ISoundTriggerHwGlobalCallback;
+import android.os.HidlMemoryUtil;
+import android.os.HwBinder;
+import android.os.RemoteException;
+import android.os.SystemProperties;
+
+import java.util.Scanner;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * This is a quick-and-dirty sound trigger HAL console mock.
+ *
+ * It would only work on userdebug builds.
+ *
+ * When this app is started, it will initially:
+ * - Register a ISoundTriggerHw HAL with an instance name "mock".
+ * - Set a sysprop that tells SoundTriggerMiddlewareService to try to connect to the mock instance
+ * rather than the default one.
+ * - Reboot the real (default) HAL.
+ *
+ * In response to that, SoundTriggerMiddlewareService is going to connect to the mock HAL and resume
+ * normal operation.
+ *
+ * Our mock HAL will print to stdout every call it receives as well as expose a basic set of
+ * operations for sending event callbacks to the client. This allows us to simulate the frameworks
+ * behavior in response to different HAL behaviors.
+ */
+public class SthalCli {
+ private static SoundTriggerImpl mService;
+ private static final Scanner scanner = new Scanner(System.in);
+
+ public static void main(String[] args) {
+ try {
+ System.out.println("Registering mock STHAL");
+ HwBinder.setTrebleTestingOverride(true);
+ mService = new SoundTriggerImpl();
+ mService.registerAsService("mock");
+
+ System.out.println("Rebooting STHAL");
+ SystemProperties.set("debug.soundtrigger_middleware.use_mock_hal", "2");
+ SystemProperties.set("sys.audio.restart.hal", "1");
+
+ while (processCommand()) ;
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ cleanup();
+ }
+ }
+
+ private static void cleanup() {
+ System.out.println("Cleaning up.");
+ SystemProperties.set("debug.soundtrigger_middleware.use_mock_hal", null);
+ HwBinder.setTrebleTestingOverride(false);
+ }
+
+ private static boolean processCommand() {
+ String line = scanner.nextLine();
+ String[] tokens = line.split("\\s+");
+ if (tokens.length < 1) {
+ return false;
+ }
+ switch (tokens[0]) {
+ case "q":
+ return false;
+
+ case "a":
+ mService.sendOnResourcesAvailable();
+ return true;
+
+ case "u":
+ mService.sendModelUnloaded(Integer.parseInt(tokens[1]));
+ return true;
+
+ case "r":
+ mService.sendRecognitionEvent(Integer.parseInt(tokens[1]),
+ Integer.parseInt(tokens[2]));
+ return true;
+
+ case "p":
+ mService.sendPhraseRecognitionEvent(Integer.parseInt(tokens[1]),
+ Integer.parseInt(tokens[2]));
+ return true;
+
+ case "d":
+ mService.dumpModels();
+ return true;
+
+ case "h":
+ System.out.print("Available commands:\n" + "h - help\n" + "q - quit\n"
+ + "a - send onResourcesAvailable event\n"
+ + "u <model> - send modelUnloaded event\n"
+ + "r <model> <status> - send recognitionEvent\n"
+ + "p <model> <status> - send phraseRecognitionEvent\n"
+ + "d - dump models\n");
+
+ default:
+ return true;
+ }
+ }
+
+ private static class SoundTriggerImpl extends ISoundTriggerHw.Stub {
+ static class Model {
+ final ISoundTriggerHwCallback callback;
+ final SoundModel model;
+ final PhraseSoundModel phraseModel;
+ public android.hardware.soundtrigger.V2_3.RecognitionConfig config = null;
+
+ Model(ISoundTriggerHwCallback callback, SoundModel model) {
+ this.callback = callback;
+ this.model = model;
+ this.phraseModel = null;
+ }
+
+ Model(ISoundTriggerHwCallback callback, PhraseSoundModel model) {
+ this.callback = callback;
+ this.model = null;
+ this.phraseModel = model;
+ }
+ }
+
+ private ISoundTriggerHwGlobalCallback mGlobalCallback;
+ private final ConcurrentMap<Integer, Model> mLoadedModels = new ConcurrentHashMap<>();
+ private int mHandleCounter = 1;
+
+ public void dumpModels() {
+ mLoadedModels.forEach((handle, model) -> {
+ System.out.println("+++ Model " + handle);
+ System.out.println(" config = " + model.config);
+ android.hardware.soundtrigger.V2_3.RecognitionConfig recognitionConfig =
+ model.config;
+ if (recognitionConfig != null) {
+ System.out.println(" ACTIVE recognitionConfig = " + recognitionConfig);
+ } else {
+ System.out.println(" INACTIVE");
+ }
+ });
+ }
+
+ public void sendOnResourcesAvailable() {
+ if (mGlobalCallback != null) {
+ try {
+ mGlobalCallback.onResourcesAvailable();
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void sendRecognitionEvent(int modelHandle, int status) {
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null && model.config != null) {
+ android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback.RecognitionEvent event =
+ new android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback.RecognitionEvent();
+ event.header.model = modelHandle;
+ event.header.type = SoundModelType.GENERIC;
+ event.header.status = status;
+ event.header.captureSession = model.config.base.header.captureHandle;
+ event.header.captureAvailable = true;
+ event.header.audioConfig.channelMask = 16;
+ event.header.audioConfig.format = 1;
+ event.header.audioConfig.sampleRateHz = 16000;
+ event.data = HidlMemoryUtil.byteArrayToHidlMemory(new byte[0]);
+ try {
+ model.callback.recognitionCallback_2_1(event, 0);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ model.config = null;
+ }
+ }
+
+ public void sendPhraseRecognitionEvent(int modelHandle, int status) {
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null && model.config != null) {
+ android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback.PhraseRecognitionEvent
+ event =
+ new android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback.PhraseRecognitionEvent();
+ event.common.header.model = modelHandle;
+ event.common.header.type = SoundModelType.KEYPHRASE;
+ event.common.header.status = status;
+ event.common.header.captureSession = model.config.base.header.captureHandle;
+ event.common.header.captureAvailable = true;
+ event.common.header.audioConfig.channelMask = 16;
+ event.common.header.audioConfig.format = 1;
+ event.common.header.audioConfig.sampleRateHz = 16000;
+ event.common.data = HidlMemoryUtil.byteArrayToHidlMemory(new byte[0]);
+ if (!model.phraseModel.phrases.isEmpty()) {
+ PhraseRecognitionExtra extra = new PhraseRecognitionExtra();
+ extra.id = model.phraseModel.phrases.get(0).id;
+ extra.confidenceLevel = 100;
+ extra.recognitionModes = model.phraseModel.phrases.get(0).recognitionModes;
+ event.phraseExtras.add(extra);
+ }
+ try {
+ model.callback.phraseRecognitionCallback_2_1(event, 0);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ model.config = null;
+ }
+ }
+
+ public void sendModelUnloaded(int modelHandle) {
+ Model model = mLoadedModels.remove(modelHandle);
+ if (model != null) {
+ try {
+ model.callback.modelUnloaded(modelHandle);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void registerGlobalCallback(ISoundTriggerHwGlobalCallback callback) {
+ System.out.println("registerGlobalCallback()");
+ mGlobalCallback = callback;
+ }
+
+ @Override
+ public void loadSoundModel_2_4(SoundModel soundModel, ISoundTriggerHwCallback callback,
+ loadSoundModel_2_4Callback _hidl_cb) {
+ int handle = mHandleCounter++;
+ System.out.printf("loadSoundModel_2_4(soundModel=%s) -> %d%n", soundModel, handle);
+ mLoadedModels.put(handle, new Model(callback, soundModel));
+ _hidl_cb.onValues(0, handle);
+ }
+
+ @Override
+ public void loadPhraseSoundModel_2_4(PhraseSoundModel soundModel,
+ ISoundTriggerHwCallback callback, loadPhraseSoundModel_2_4Callback _hidl_cb) {
+ int handle = mHandleCounter++;
+ System.out.printf("loadPhraseSoundModel_2_4(soundModel=%s) -> %d%n", soundModel,
+ handle);
+ mLoadedModels.put(handle, new Model(callback, soundModel));
+ _hidl_cb.onValues(0, handle);
+ }
+
+ @Override
+ public int startRecognition_2_4(int modelHandle,
+ android.hardware.soundtrigger.V2_3.RecognitionConfig config) {
+ System.out.printf("startRecognition_2_4(modelHandle=%d)%n", modelHandle);
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null) {
+ model.config = config;
+ }
+ return 0;
+ }
+
+ @Override
+ public void getProperties_2_3(getProperties_2_3Callback _hidl_cb) {
+ System.out.println("getProperties_2_3()");
+ android.hardware.soundtrigger.V2_3.Properties properties =
+ new android.hardware.soundtrigger.V2_3.Properties();
+ properties.base.implementor = "Android";
+ properties.base.description = "Mock STHAL";
+ properties.base.maxSoundModels = 2;
+ properties.base.maxKeyPhrases = 1;
+ properties.base.recognitionModes =
+ RecognitionMode.VOICE_TRIGGER | RecognitionMode.GENERIC_TRIGGER;
+ _hidl_cb.onValues(0, properties);
+ }
+
+ @Override
+ public void queryParameter(int modelHandle, int modelParam,
+ queryParameterCallback _hidl_cb) {
+ _hidl_cb.onValues(0, new OptionalModelParameterRange());
+ }
+
+ @Override
+ public int getModelState(int modelHandle) {
+ System.out.printf("getModelState(modelHandle=%d)%n", modelHandle);
+ return 0;
+ }
+
+ @Override
+ public int unloadSoundModel(int modelHandle) {
+ System.out.printf("unloadSoundModel(modelHandle=%d)%n", modelHandle);
+ return 0;
+ }
+
+ @Override
+ public int stopRecognition(int modelHandle) {
+ System.out.printf("stopRecognition(modelHandle=%d)%n", modelHandle);
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null) {
+ model.config = null;
+ }
+ return 0;
+ }
+
+ @Override
+ public void debug(android.os.NativeHandle fd, java.util.ArrayList<String> options) {
+ if (!options.isEmpty()) {
+ switch (options.get(0)) {
+ case "reboot":
+ System.out.println("Received a reboot request. Exiting.");
+ cleanup();
+ System.exit(1);
+ }
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // Everything below is not implemented and not expected to be called.
+
+ @Override
+ public int startRecognition_2_3(int modelHandle,
+ android.hardware.soundtrigger.V2_3.RecognitionConfig config) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int setParameter(int modelHandle, int modelParam, int value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void getParameter(int modelHandle, int modelParam, getParameterCallback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void loadSoundModel_2_1(SoundModel soundModel,
+ android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback callback, int cookie,
+ loadSoundModel_2_1Callback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void loadPhraseSoundModel_2_1(PhraseSoundModel soundModel,
+ android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback callback, int cookie,
+ loadPhraseSoundModel_2_1Callback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startRecognition_2_1(int modelHandle, RecognitionConfig config,
+ android.hardware.soundtrigger.V2_1.ISoundTriggerHwCallback callback, int cookie) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void getProperties(getPropertiesCallback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void loadSoundModel(
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHw.SoundModel soundModel,
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHwCallback callback, int cookie,
+ loadSoundModelCallback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void loadPhraseSoundModel(
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHw.PhraseSoundModel soundModel,
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHwCallback callback, int cookie,
+ loadPhraseSoundModelCallback _hidl_cb) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startRecognition(int modelHandle,
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHw.RecognitionConfig config,
+ android.hardware.soundtrigger.V2_0.ISoundTriggerHwCallback callback, int cookie) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int stopAllRecognitions() {
+ throw new UnsupportedOperationException();
+ }
+ }
+}
diff --git a/soundtrigger/2.4/cli/sthal_cli_2.4 b/soundtrigger/2.4/cli/sthal_cli_2.4
new file mode 100644
index 0000000..0801464
--- /dev/null
+++ b/soundtrigger/2.4/cli/sthal_cli_2.4
@@ -0,0 +1,7 @@
+#!/system/bin/sh
+# Script to start "sthal_cli_2.4" on the device
+#
+base=/system
+export CLASSPATH=$base/framework/sthal_cli_2.4.jar
+exec app_process $base/bin android.hardware.soundtrigger.V2_4.cli.SthalCli "$@"
+
diff --git a/soundtrigger/2.4/vts/functional/Android.bp b/soundtrigger/2.4/vts/functional/Android.bp
new file mode 100644
index 0000000..4b7ae91
--- /dev/null
+++ b/soundtrigger/2.4/vts/functional/Android.bp
@@ -0,0 +1,41 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_test {
+ name: "VtsHalSoundtriggerV2_4TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: ["VtsHalSoundtriggerV2_4TargetTest.cpp"],
+ static_libs: [
+ "android.hardware.soundtrigger@2.0",
+ "android.hardware.soundtrigger@2.1",
+ "android.hardware.soundtrigger@2.2",
+ "android.hardware.soundtrigger@2.3",
+ "android.hardware.soundtrigger@2.4",
+ ],
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
+}
diff --git a/soundtrigger/2.4/vts/functional/VtsHalSoundtriggerV2_4TargetTest.cpp b/soundtrigger/2.4/vts/functional/VtsHalSoundtriggerV2_4TargetTest.cpp
new file mode 100644
index 0000000..13d7005
--- /dev/null
+++ b/soundtrigger/2.4/vts/functional/VtsHalSoundtriggerV2_4TargetTest.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SoundTriggerHidlHalTest"
+
+#include <android-base/logging.h>
+#include <android/hardware/audio/common/2.0/types.h>
+#include <android/hardware/soundtrigger/2.4/ISoundTriggerHwGlobalCallback.h>
+#include <android/hardware/soundtrigger/2.4/ISoundTriggerHw.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+using ::android::hardware::soundtrigger::V2_4::ISoundTriggerHw;
+using ::android::hardware::soundtrigger::V2_4::ISoundTriggerHwGlobalCallback;
+
+/**
+ * Test class holding the instance of the SoundTriggerHW service to test.
+ * The passed parameter is the registered name of the implementing service
+ * supplied by INSTANTIATE_TEST_SUITE_P() call.
+ */
+class SoundTriggerHidlTest : public testing::TestWithParam<std::string> {
+public:
+ void SetUp() override {
+ mSoundtrigger = ISoundTriggerHw::getService(GetParam());
+
+ ASSERT_NE(mSoundtrigger, nullptr);
+ LOG(INFO) << "Test is remote " << mSoundtrigger->isRemote();
+ }
+
+protected:
+ sp<ISoundTriggerHw> mSoundtrigger;
+};
+
+/**
+ * Empty test is in place to ensure service is initialized.
+ * Due to the nature of SoundTrigger HAL providing an interface for
+ * proprietary or vendor specific implementations, limited testing on
+ * individual APIs is possible.
+ */
+TEST_P(SoundTriggerHidlTest, ServiceIsInstantiated) {}
+
+class GlobalCallback : public ISoundTriggerHwGlobalCallback {
+ Return<void> onResourcesAvailable() override {
+ return Status::ok();
+ }
+};
+
+/**
+ * Test ISoundTriggerHw::registerGlobalCallback method
+ *
+ * Verifies that:
+ * - the implementation implements the method
+ * - the method returns no error
+ */
+TEST_P(SoundTriggerHidlTest, RegisterGlobalCallback) {
+ Return<void> hidlReturn;
+ sp<ISoundTriggerHwGlobalCallback> callback = new GlobalCallback();
+ hidlReturn = mSoundtrigger->registerGlobalCallback(callback);
+ EXPECT_TRUE(hidlReturn.isOk());
+}
+
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SoundTriggerHidlTest);
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, SoundTriggerHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(ISoundTriggerHw::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/soundtrigger/aidl/Android.bp b/soundtrigger/aidl/Android.bp
new file mode 100644
index 0000000..fcccc27
--- /dev/null
+++ b/soundtrigger/aidl/Android.bp
@@ -0,0 +1,37 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+aidl_interface {
+ name: "android.hardware.soundtrigger3",
+ vendor_available: true,
+ flags: ["-Werror", "-Weverything", ],
+ srcs: [
+ "android/hardware/soundtrigger3/ISoundTriggerHw.aidl",
+ "android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl",
+ "android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl",
+ ],
+ stability: "vintf",
+ imports: [
+ "android.media.soundtrigger.types",
+ ],
+ backend: {
+ cpp: {
+ // prefer NDK backend which can be used anywhere
+ enabled: false,
+ },
+ java: {
+ sdk_version: "module_current",
+ },
+ ndk: {
+ vndk: {
+ enabled: true,
+ },
+ },
+ },
+}
diff --git a/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHw.aidl b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
new file mode 100644
index 0000000..bbfe7d9
--- /dev/null
+++ b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.soundtrigger3;
+@VintfStability
+interface ISoundTriggerHw {
+ android.media.soundtrigger.Properties getProperties();
+ void registerGlobalCallback(in android.hardware.soundtrigger3.ISoundTriggerHwGlobalCallback callback);
+ int loadSoundModel(in android.media.soundtrigger.SoundModel soundModel, in android.hardware.soundtrigger3.ISoundTriggerHwCallback callback);
+ int loadPhraseSoundModel(in android.media.soundtrigger.PhraseSoundModel soundModel, in android.hardware.soundtrigger3.ISoundTriggerHwCallback callback);
+ void unloadSoundModel(in int modelHandle);
+ void startRecognition(in int modelHandle, in int deviceHandle, in int ioHandle, in android.media.soundtrigger.RecognitionConfig config);
+ void stopRecognition(in int modelHandle);
+ void forceRecognitionEvent(in int modelHandle);
+ @nullable android.media.soundtrigger.ModelParameterRange queryParameter(in int modelHandle, in android.media.soundtrigger.ModelParameter modelParam);
+ int getParameter(in int modelHandle, in android.media.soundtrigger.ModelParameter modelParam);
+ void setParameter(in int modelHandle, in android.media.soundtrigger.ModelParameter modelParam, in int value);
+}
diff --git a/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl
new file mode 100644
index 0000000..152dfed
--- /dev/null
+++ b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.soundtrigger3;
+@VintfStability
+interface ISoundTriggerHwCallback {
+ void modelUnloaded(in int model);
+ void phraseRecognitionCallback(in int model, in android.media.soundtrigger.PhraseRecognitionEvent event);
+ void recognitionCallback(in int model, in android.media.soundtrigger.RecognitionEvent event);
+}
diff --git a/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl
new file mode 100644
index 0000000..6dfee9f
--- /dev/null
+++ b/soundtrigger/aidl/aidl_api/android.hardware.soundtrigger3/current/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.soundtrigger3;
+@VintfStability
+interface ISoundTriggerHwGlobalCallback {
+ void onResourcesAvailable();
+}
diff --git a/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
new file mode 100644
index 0000000..2a3fc64
--- /dev/null
+++ b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger3;
+
+import android.hardware.soundtrigger3.ISoundTriggerHwCallback;
+import android.hardware.soundtrigger3.ISoundTriggerHwGlobalCallback;
+
+import android.media.soundtrigger.PhraseSoundModel;
+import android.media.soundtrigger.Properties;
+import android.media.soundtrigger.RecognitionConfig;
+import android.media.soundtrigger.SoundModel;
+import android.media.soundtrigger.ModelParameter;
+import android.media.soundtrigger.ModelParameterRange;
+import android.media.soundtrigger.Properties;
+import android.media.soundtrigger.RecognitionConfig;
+
+/**
+ * SoundTrigger HAL interface. Used for hardware recognition of hotwords
+ * and other sounds.
+ *
+ * Basic usage:
+ * ============
+ * ISoundTriggerHw supports the ability to have one of more detection sessions running at a given
+ * time, and listening to acoustic events. The basic flow of setting up such a session is:
+ * - Load a model using loadSoundModel() or loadPhraseSoundModel(). The provided model object would
+ * indicate the (implementation-specific) detection algorithm (engine) to use, as well as any
+ * parameters applicable for this agorithm. Upon success, those methods would return a handle
+ * which will be used to reference this model in subsequent calls.
+ * - Once the model had been successfully loaded, detection can begin by calling startRecognition().
+ * - Recognition will continue running in the background until one of the following events occurs:
+ * - stopRecognition() has been called on this model.
+ * - A detection has occurred.
+ * - Detection was aborted, typically for resource constraints, for example, when a higher-
+ * priority use-case has been initiated.
+ * - In the latter two cases, a recognition event will be sent via a the callback interface that was
+ * registered by the client upon loading. In either case, after any of these events occur, the
+ * detection becomes inactive and no more recognition callbacks are allowed.
+ * - The same model maybe started again at a later time, and this process may repeat as many times
+ * as needed.
+ * - Finally, an inactive model that is no longer needed may be unloaded via unloadModel().
+ *
+ * Important notes about the threading model:
+ * ==========================================
+ * Both this interface and the corresponding callback interface use a synchronous calling
+ * convention. This model comes with some advantages, but also with some risks of deadlocks if the
+ * implementation does not handle this correctly. Please consider the following:
+ * - After stopRecognition() returns, no more recognition events for that model may be sent. This
+ * implies that any queues holding such events must be flushed before the call returns and that
+ * may imply that callback from the HAL to the client are done while stopRecognition() is blocked.
+ * This is OK, and supported by the framework.
+ * - Similarly, the same relationship applies between unloadModel() and subsequent callbacks to
+ * modelUnloaded().
+ * - Other than these two cases, calls into the HAL *MAY NOT* block on callbacks from the HAL, or
+ * else deadlock conditions may result, which may be handled by rebooting of the HAL process and
+ * cause service outages.
+ *
+ * Due to the asynchronous nature of recognition events and preemptive model unloading, the HAL must
+ * correctly handle requests that would have been valid before an event has been delivered, but
+ * became moot as result of the event. Namely:
+ * - stopRecognition() may be called on a model that has already delivered an event and became
+ * inactive as a result. The HAL must return a successful return code in this case.
+ * - Furthermore, if a model is preemptively unloaded after it triggers (typically, this would
+ * happen when it is first aborted and immediately preemptively unloaded), stopRecognition() may
+ * be called on it. The HAL must return successfully in this case.
+ * - startRecognition() may be called on a model that has been preemptively unloaded. In this case,
+ * the HAL must signal a ServiceSpecificException(RESOURCE_CONTENTION) to indicate that the
+ * operation is temporarily unsuccessful.
+ * - unloadSoundModel() may be called on a model that has been preemptively unloaded. The HAL must
+ * return a successful return code in this case. This also implies that model handles should
+ * generally not be reused until explicitly unloaded. To avoid the rare possibility of running out
+ * of handles, the framework may call unloadModel() on models that have been preemptively unloaded
+ * by the HAL.
+ *
+ * Important notes about resource constraints and concurrency
+ * =========================================================
+ * Up until this version, the framework would enforce concurrency constraints expressed by the
+ * Properties presented by the soundtrigger instance. These include constraints on the maximum
+ * amount of models that can be loaded at the same time and on running recognition while capturing
+ * from the microphone.
+ * This version changes the approach for how these constraints are modeled, both offering the HAL
+ * implementation more flexibility and simplifying the framework's job in enforcing these
+ * limitations. Note that there is no change for how the framework behaves with earlier versions,
+ * everything described below only applies to this version and onward.
+ * The way this is achieved is as following:
+ * - The framework will no longer enforce constraints on concurrent loading of models, as expressed
+ * in the Properties.maxSoundModels field (this property is merely a hint at this point and may be
+ * deprecated in the future), or any other implicit constraints.
+ * - The framework will no longer enforce constraints on concurrency of audio recording and
+ * soundtrigger operation, as expressed in the Properties.concurrentCapture field (this property
+ * is merely a hint at this point and may be deprecated in the future).
+ * - The HAL implementation is free to reject loading of any model at any time by having the
+ * respective load*() method signal a ServiceSpecificException(RESOURCE_CONTENTION).
+ * - The HAL implementation is free to reject starting of any model at any time by having the
+ * respective start*() method signal a ServiceSpecificException(RESOURCE_CONTENTION).
+ * - The HAL implementation is free to preemptively stop a previously started model at its own
+ * discretion (for example, if a higher priority use-case which cannot coexist with detection
+ * has been requested). The HAL must notify the framework of the preemption by sending a
+ * recognition event with an `ABORTED` status. The implementation must NOT attempt to restart the
+ * recognition automatically when conditions change.
+ * - The HAL implementation is free to preemptively unload a previously loaded model at its own
+ * discretion (for example, if a higher-priority model is being loaded and the two cannot
+ * coexist). When doing so, it must first abort the detection if active (as per above) and then
+ * notify the framework of the unload using the modelUnloaded() callback.
+ * - When conditions change, such that a model that couldn't previously load or start or that had
+ * previously been preemptively stopped or unloaded, the HAL must notify the framework via the
+ * newly added onResourcesAvailable() callback. This callback is not a guarantee that any
+ * operation would now succeed, but merely a hint that retrying something that had previously
+ * failed, now MAY succeed. Until this callback is invoked, the client may assume that any
+ * operation that had previously failed or aborted would still fail if retried, so the
+ * implementation should not forget to deliver it.
+ * There are no guarantees regarding how the framework may respond to this event and the order in
+ * which it may choose to reload/restart its models. Typically, as result of this event the
+ * framework will make a single attempt per model to bring this model to its desired state
+ * (loaded, started).
+ */
+@VintfStability
+interface ISoundTriggerHw {
+ /**
+ * Retrieve implementation properties.
+ *
+ * @return A Properties structure containing implementation description and capabilities.
+ */
+ Properties getProperties();
+
+ /**
+ * This will get called at most once per every attachment to the service.
+ *
+ * All events not tied to a specific model should go through this callback.
+ *
+ * @param callback An interface to receive global event callbacks.
+ */
+ void registerGlobalCallback(in ISoundTriggerHwGlobalCallback callback);
+
+ /**
+ * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+ *
+ * @param soundModel A SoundModel structure describing the sound model to load.
+ * @param callback The callback interface on which the recognitionCallback()
+ * method will be called upon completion and modelUnloaded() upon preemptive unload.
+ * @return A unique handle assigned by the HAL for use by the client when controlling
+ * activity for this sound model.
+ * @throws ServiceSpecificException(RESOURCE_CONTENTION) if the model cannot be loaded due
+ * to resource constraints. This is typically a temporary condition and the client may
+ * retry after the onResourcesAvailable() global callback is invoked.
+ */
+ int loadSoundModel(in SoundModel soundModel, in ISoundTriggerHwCallback callback);
+
+ /**
+ * Load a key phrase sound model. Once loaded, recognition of this model can be started and
+ * stopped.
+ *
+ * @param soundModel A PhraseSoundModel structure describing the sound model to load.
+ * @param callback The callback interface on which the phraseRecognitionCallback() method will
+ * be called upon completion and modelUnloaded() upon preempted unload.
+ * @return A unique handle assigned by the HAL for use by the framework when controlling
+ * activity for this sound model.
+ * @throws ServiceSpecificException(RESOURCE_CONTENTION) if the model cannot be loaded due
+ * to resource constraints. This is typically a temporary condition and the client may
+ * retry after the onResourcesAvailable() global callback is invoked.
+ */
+ int loadPhraseSoundModel(in PhraseSoundModel soundModel, in ISoundTriggerHwCallback callback);
+
+ /**
+ * Unload a sound model. A sound model may be unloaded to free up resources and make room for a
+ * new one to overcome implementation limitations.
+ * This call is idempotent, to avoid any race conditions.
+ *
+ * @param modelHandle the handle of the sound model to unload.
+ */
+ void unloadSoundModel(in int modelHandle);
+
+ /**
+ * Start recognition on a given model.
+ * This must be called on a model that is in the stopped state.
+ * The state of this model becomes active and will remain so until explicitly stopped, or a
+ * recognition event had been delivered to the client.
+ *
+ * @param modelHandle the handle of the sound model to use for recognition
+ * @param deviceHandle The handle of the audio device to be used for recognition, as declared by
+ * the audio subsystem.
+ * @param ioHandle A handle assigned by the framework, which will later be used to retrieve
+ * an audio stream associated with this recognition session.
+ * @param config A RecognitionConfig structure containing attributes of the recognition to
+ * perform.
+ * @throws ServiceSpecificException(RESOURCE_CONTENTION) if the model cannot be started due
+ * to resource constraints. This is typically a temporary condition and the client may
+ * retry after the onResourcesAvailable() global callback is invoked.
+ */
+ void startRecognition(in int modelHandle, in int deviceHandle,
+ in int ioHandle, in RecognitionConfig config);
+
+ /**
+ * Stop recognition on a given model.
+ * This call is idempotent, to avoid any race conditions.
+ *
+ * @param modelHandle The handle of the sound model to use for recognition
+ */
+ void stopRecognition(in int modelHandle);
+
+ /**
+ * Request a recognition event to be generated.
+ * The model must be in the started state and will remain started after the event is sent.
+ * The model state is returned asynchronously as a RecognitionEvent via the callback that was
+ * registered upon loading. That event must have a RecognitionStatus.FORCED status.
+ *
+ * @param modelHandle The handle of the sound model whose state is being
+ * queried.
+ */
+ void forceRecognitionEvent(in int modelHandle);
+
+ /**
+ * Get supported parameter attributes with respect to the provided model handle.
+ * Model parameters are used to query/control model-specific detection behavior during a
+ * detection session.
+ * Along with determining the valid range, this API is also used to determine if a given
+ * parameter ID is supported at all by the modelHandle for use with getParameter() and
+ * setParameter() APIs.
+ *
+ * @param modelHandle The sound model handle indicating which model to query.
+ * @param modelParam Parameter to set which will be validated against the ModelParameter type.
+ * @return This structure indicates supported attributes of the parameter for the given model
+ * handle. If the parameter is not supported, null is returned.
+ */
+ @nullable ModelParameterRange queryParameter(in int modelHandle, in ModelParameter modelParam);
+
+ /**
+ * Get a model specific parameter.
+ * If the value has not been set, a default value is returned. See ModelParameter for parameter
+ * default values.
+ * The caller must check if the handle supports the parameter via the queryParameter API prior
+ * to calling this method.
+ *
+ * @param modelHandle The sound model associated with given modelParam
+ * @param modelParam Parameter to set which will be validated against the ModelParameter type.
+ * Not putting ModelParameter type directly in the definition and validating internally
+ * allows for forward compatibility.
+ * @return Value set to the requested parameter.
+ */
+ int getParameter(in int modelHandle, in ModelParameter modelParam);
+
+ /**
+ * Set a model specific parameter with the given value.
+ * This parameter will keep its value for the duration the model is loaded regardless of
+ * starting and stopping recognition. Once the model is unloaded, the value will be lost.
+ * The caller must check if the handle supports the parameter via the queryParameter API prior
+ * to calling this method.
+ *
+ * @param modelHandle The sound model handle indicating which model to modify parameters
+ * @param modelParam Parameter to set which will be validated against the ModelParameter type.
+ * Not putting ModelParameter type directly in the definition and validating internally
+ * allows for forward compatibility.
+ * @param value The value to set for the given model parameter.
+ */
+ void setParameter(in int modelHandle, in ModelParameter modelParam, in int value);
+}
diff --git a/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl
new file mode 100644
index 0000000..049ca65
--- /dev/null
+++ b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwCallback.aidl
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger3;
+
+import android.media.soundtrigger.PhraseRecognitionEvent;
+import android.media.soundtrigger.RecognitionEvent;
+
+/**
+ * SoundTrigger HAL per-model Callback interface.
+ */
+@VintfStability
+interface ISoundTriggerHwCallback {
+ /**
+ * Callback method called by the HAL when a model has been unloaded at the HAL implementation's
+ * discretion.
+ * This event may only be delivered when the model state is 'stopped'.
+ * This event is NOT sent as part of an unload sequence initiated by the client.
+ *
+ * @param model The model handle.
+ */
+ void modelUnloaded(in int model);
+
+ /**
+ * Callback method called by the HAL when the sound recognition triggers for a key phrase sound
+ * model.
+ * This event may only be delivered when the model state is 'started'.
+ * Unless the status of the event is RecognitionStatus.FORCED, this event indicates that the
+ * state of this model has become 'stopped'.
+ *
+ * @param event A RecognitionEvent structure containing detailed results of the recognition
+ * triggered
+ */
+ void phraseRecognitionCallback(in int model, in PhraseRecognitionEvent event);
+
+ /**
+ * Callback method called by the HAL when the sound recognition triggers.
+ * This event may only be delivered when the model state is 'started'.
+ * Unless the status of the event is RecognitionStatus.FORCED, this event indicates that the
+ * state of this model has become 'stopped'.
+ *
+ * @param event A RecognitionEvent structure containing detailed results of the recognition
+ * triggered
+ */
+ void recognitionCallback(in int model, in RecognitionEvent event);
+}
diff --git a/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl
new file mode 100644
index 0000000..d6d8630
--- /dev/null
+++ b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHwGlobalCallback.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.soundtrigger3;
+
+/**
+ * SoundTrigger HAL callback interface for events not associated with a particular model.
+ */
+@VintfStability
+interface ISoundTriggerHwGlobalCallback {
+ /**
+ * Callback method called by the HAL whenever internal conditions have been made available, such
+ * that a call that would previously have failed with an -EBUSY status may now succeed.
+ * There is no guarantee that any call would succeed following this event. It is merely a hint
+ * to the client that it may retry.
+ * Conversely, any call that have failed previously with a
+ * ServiceSpecificException(RESOURCE_CONTENTION) is guaranteed to fail again if retried, until
+ * this callback is delivered.
+ */
+ void onResourcesAvailable();
+}
diff --git a/soundtrigger/aidl/cli/Android.bp b/soundtrigger/aidl/cli/Android.bp
new file mode 100644
index 0000000..e8999ff
--- /dev/null
+++ b/soundtrigger/aidl/cli/Android.bp
@@ -0,0 +1,17 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+java_binary {
+ name: "sthal_cli_3",
+ wrapper: "sthal_cli_3",
+ srcs: ["java/**/*.java"],
+ static_libs: [
+ "android.hardware.soundtrigger3-V1-java",
+ ],
+}
diff --git a/soundtrigger/aidl/cli/OWNERS b/soundtrigger/aidl/cli/OWNERS
new file mode 100644
index 0000000..e21b66e
--- /dev/null
+++ b/soundtrigger/aidl/cli/OWNERS
@@ -0,0 +1 @@
+include /media/java/android/media/soundtrigger_middleware/OWNERS
diff --git a/soundtrigger/aidl/cli/java/android/hardware/soundtrigger3/cli/SthalCli.java b/soundtrigger/aidl/cli/java/android/hardware/soundtrigger3/cli/SthalCli.java
new file mode 100644
index 0000000..d3e1aa7
--- /dev/null
+++ b/soundtrigger/aidl/cli/java/android/hardware/soundtrigger3/cli/SthalCli.java
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.soundtrigger3.cli;
+
+import android.annotation.NonNull;
+import android.hardware.soundtrigger3.ISoundTriggerHw;
+import android.hardware.soundtrigger3.ISoundTriggerHwCallback;
+import android.hardware.soundtrigger3.ISoundTriggerHwGlobalCallback;
+import android.media.audio.common.AudioConfig;
+import android.media.soundtrigger.ConfidenceLevel;
+import android.media.soundtrigger.ModelParameterRange;
+import android.media.soundtrigger.PhraseRecognitionEvent;
+import android.media.soundtrigger.PhraseRecognitionExtra;
+import android.media.soundtrigger.PhraseSoundModel;
+import android.media.soundtrigger.Properties;
+import android.media.soundtrigger.RecognitionConfig;
+import android.media.soundtrigger.RecognitionEvent;
+import android.media.soundtrigger.RecognitionMode;
+import android.media.soundtrigger.SoundModel;
+import android.media.soundtrigger.SoundModelType;
+import android.os.HwBinder;
+import android.os.ParcelFileDescriptor;
+import android.os.RemoteException;
+import android.os.ServiceManager;
+import android.os.SystemProperties;
+
+import java.util.Scanner;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * This is a quick-and-dirty sound trigger HAL console mock.
+ *
+ * It would only work on userdebug builds.
+ *
+ * When this app is started, it will initially:
+ * - Register a ISoundTriggerHw HAL with an instance name "mock".
+ * - Set a sysprop that tells SoundTriggerMiddlewareService to try to connect to the mock instance
+ * rather than the default one.
+ * - Reboot the real (default) HAL.
+ *
+ * In response to that, SoundTriggerMiddlewareService is going to connect to the mock HAL and resume
+ * normal operation.
+ *
+ * Our mock HAL will print to stdout every call it receives as well as expose a basic set of
+ * operations for sending event callbacks to the client. This allows us to simulate the frameworks
+ * behavior in response to different HAL behaviors.
+ */
+public class SthalCli {
+ private static SoundTriggerImpl mService;
+ private static final Scanner scanner = new Scanner(System.in);
+
+ public static void main(String[] args) {
+ try {
+ printUsage();
+
+ System.out.println("Registering mock STHAL");
+ mService = new SoundTriggerImpl();
+ // This allows us to register the service, even if it is not declared in the manifest.
+ mService.forceDowngradeToSystemStability();
+ ServiceManager.addService(ISoundTriggerHw.class.getCanonicalName() + "/mock", mService);
+
+ System.out.println("Rebooting STHAL");
+ SystemProperties.set("debug.soundtrigger_middleware.use_mock_hal", "3");
+ SystemProperties.set("sys.audio.restart.hal", "1");
+
+ while (processCommand()) ;
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ cleanup();
+ }
+ }
+
+ private static void cleanup() {
+ System.out.println("Cleaning up.");
+ SystemProperties.set("debug.soundtrigger_middleware.use_mock_hal", null);
+ HwBinder.setTrebleTestingOverride(false);
+ }
+
+ private static boolean processCommand() {
+ String line = scanner.nextLine();
+ String[] tokens = line.split("\\s+");
+ if (tokens.length < 1) {
+ return false;
+ }
+ switch (tokens[0]) {
+ case "q":
+ return false;
+
+ case "a":
+ mService.sendOnResourcesAvailable();
+ return true;
+
+ case "u":
+ mService.sendModelUnloaded(Integer.parseInt(tokens[1]));
+ return true;
+
+ case "r":
+ mService.sendRecognitionEvent(Integer.parseInt(tokens[1]),
+ Integer.parseInt(tokens[2]));
+ return true;
+
+ case "p":
+ mService.sendPhraseRecognitionEvent(Integer.parseInt(tokens[1]),
+ Integer.parseInt(tokens[2]));
+ return true;
+
+ case "d":
+ mService.dumpModels();
+ return true;
+
+ default:
+ printUsage();
+ return true;
+ }
+ }
+
+ private static void printUsage() {
+ System.out.print(
+ "Sound Trigger HAL v3 mock\n"
+ + "Available commands:\n"
+ + "h - help\n"
+ + "q - quit\n"
+ + "a - send onResourcesAvailable event\n"
+ + "u <model> - send modelUnloaded event\n"
+ + "r <model> <status> - send recognitionEvent\n"
+ + "p <model> <status> - send phraseRecognitionEvent\n"
+ + "d - dump models\n");
+ }
+
+ private static class SoundTriggerImpl extends ISoundTriggerHw.Stub {
+ static class Model {
+ final ISoundTriggerHwCallback callback;
+ final SoundModel model;
+ final PhraseSoundModel phraseModel;
+ public RecognitionConfig config = null;
+
+ Model(ISoundTriggerHwCallback callback, SoundModel model) {
+ this.callback = callback;
+ this.model = model;
+ this.phraseModel = null;
+ }
+
+ Model(ISoundTriggerHwCallback callback, PhraseSoundModel model) {
+ this.callback = callback;
+ this.model = null;
+ this.phraseModel = model;
+ }
+ }
+
+ private ISoundTriggerHwGlobalCallback mGlobalCallback;
+ private final ConcurrentMap<Integer, Model> mLoadedModels = new ConcurrentHashMap<>();
+ private int mHandleCounter = 1;
+
+ public void dumpModels() {
+ mLoadedModels.forEach((handle, model) -> {
+ System.out.println("+++ Model " + handle);
+ System.out.println(" config = " + model.config);
+ RecognitionConfig recognitionConfig = model.config;
+ if (recognitionConfig != null) {
+ System.out.println(" ACTIVE recognitionConfig = " + recognitionConfig);
+ } else {
+ System.out.println(" INACTIVE");
+ }
+ });
+ }
+
+ public void sendOnResourcesAvailable() {
+ if (mGlobalCallback != null) {
+ try {
+ mGlobalCallback.onResourcesAvailable();
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void sendRecognitionEvent(int modelHandle, int status) {
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null && model.config != null) {
+ RecognitionEvent event = new RecognitionEvent();
+ event.type = SoundModelType.GENERIC;
+ event.status = status;
+ event.captureAvailable = true;
+ event.audioConfig.channelMask = 16;
+ event.audioConfig.format = 1;
+ event.audioConfig.sampleRateHz = 16000;
+ try {
+ model.callback.recognitionCallback(modelHandle, event);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ model.config = null;
+ }
+ }
+
+ public void sendPhraseRecognitionEvent(int modelHandle, int status) {
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null && model.config != null) {
+ PhraseRecognitionEvent event = new PhraseRecognitionEvent();
+ event.common = new RecognitionEvent();
+ event.common.type = SoundModelType.KEYPHRASE;
+ event.common.status = status;
+ event.common.captureAvailable = true;
+ event.common.audioConfig = new AudioConfig();
+ event.common.audioConfig.channelMask = 16;
+ event.common.audioConfig.format = 1;
+ event.common.audioConfig.sampleRateHz = 16000;
+ if (model.phraseModel.phrases.length > 0) {
+ PhraseRecognitionExtra extra = new PhraseRecognitionExtra();
+ extra.id = model.phraseModel.phrases[0].id;
+ extra.confidenceLevel = 100;
+ extra.recognitionModes = model.phraseModel.phrases[0].recognitionModes;
+ extra.levels = new ConfidenceLevel[0];
+ event.phraseExtras = new PhraseRecognitionExtra[]{extra};
+ } else {
+ event.phraseExtras = new PhraseRecognitionExtra[0];
+ }
+ try {
+ model.callback.phraseRecognitionCallback(modelHandle, event);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ model.config = null;
+ }
+ }
+
+ public void sendModelUnloaded(int modelHandle) {
+ Model model = mLoadedModels.remove(modelHandle);
+ if (model != null) {
+ try {
+ model.callback.modelUnloaded(modelHandle);
+ } catch (RemoteException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void registerGlobalCallback(ISoundTriggerHwGlobalCallback callback) {
+ System.out.println("registerGlobalCallback()");
+ mGlobalCallback = callback;
+ }
+
+ @Override
+ public int loadSoundModel(SoundModel soundModel, ISoundTriggerHwCallback callback) {
+ int handle = mHandleCounter++;
+ System.out.printf("loadSoundModel(soundModel=%s) -> %d%n", soundModel, handle);
+ mLoadedModels.put(handle, new Model(callback, soundModel));
+ return handle;
+ }
+
+ @Override
+ public int loadPhraseSoundModel(PhraseSoundModel soundModel,
+ ISoundTriggerHwCallback callback) {
+ int handle = mHandleCounter++;
+ System.out.printf("loadPhraseSoundModel(soundModel=%s) -> %d%n", soundModel, handle);
+ mLoadedModels.put(handle, new Model(callback, soundModel));
+ return handle;
+ }
+
+ @Override
+ public void startRecognition(int modelHandle, int deviceHandle, int ioHandle,
+ RecognitionConfig config) {
+ System.out.printf("startRecognition(modelHandle=%d, deviceHandle=%d, ioHandle=%d)%n",
+ modelHandle, deviceHandle, ioHandle);
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null) {
+ model.config = config;
+ }
+ }
+
+ @Override
+ public Properties getProperties() {
+ System.out.println("getProperties()");
+ Properties properties = new Properties();
+ properties.implementor = "Android";
+ properties.description = "Mock STHAL";
+ properties.maxSoundModels = 2;
+ properties.maxKeyPhrases = 1;
+ properties.recognitionModes =
+ RecognitionMode.VOICE_TRIGGER | RecognitionMode.GENERIC_TRIGGER;
+ return properties;
+ }
+
+ @Override
+ public ModelParameterRange queryParameter(int modelHandle, int modelParam) {
+ System.out.printf("queryParameter(modelHandle=%d, modelParam=%d)%n", modelHandle,
+ modelParam);
+ return null;
+ }
+
+ @Override
+ public void forceRecognitionEvent(int modelHandle) {
+ System.out.printf("getModelState(modelHandle=%d)%n", modelHandle);
+ }
+
+ @Override
+ public void unloadSoundModel(int modelHandle) {
+ System.out.printf("unloadSoundModel(modelHandle=%d)%n", modelHandle);
+ }
+
+ @Override
+ public void stopRecognition(int modelHandle) {
+ System.out.printf("stopRecognition(modelHandle=%d)%n", modelHandle);
+ Model model = mLoadedModels.get(modelHandle);
+ if (model != null) {
+ model.config = null;
+ }
+ }
+
+ @Override
+ public int handleShellCommand(@NonNull ParcelFileDescriptor in,
+ @NonNull ParcelFileDescriptor out, @NonNull ParcelFileDescriptor err,
+ @NonNull String[] args) {
+ if (args.length > 0) {
+ switch (args[0]) {
+ case "reboot":
+ System.out.println("Received a reboot request. Exiting.");
+ cleanup();
+ System.exit(1);
+ }
+ }
+ return 0;
+ }
+
+ @Override
+ public void setParameter(int modelHandle, int modelParam, int value) {
+ throw new IllegalArgumentException();
+ }
+
+ @Override
+ public int getParameter(int modelHandle, int modelParam) {
+ throw new IllegalArgumentException();
+ }
+ }
+}
diff --git a/soundtrigger/aidl/cli/sthal_cli_3 b/soundtrigger/aidl/cli/sthal_cli_3
new file mode 100644
index 0000000..f157c50
--- /dev/null
+++ b/soundtrigger/aidl/cli/sthal_cli_3
@@ -0,0 +1,6 @@
+#!/system/bin/sh
+# Script to start "sthal_cli_3" on the device
+#
+base=/system
+export CLASSPATH=$base/framework/sthal_cli_3.jar
+exec app_process $base/bin android.hardware.soundtrigger3.cli.SthalCli "$@"
diff --git a/tetheroffload/control/1.1/ITetheringOffloadCallback.hal b/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
index 7a7d56d..9c74641 100644
--- a/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
+++ b/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
@@ -26,8 +26,8 @@
interface ITetheringOffloadCallback extends @1.0::ITetheringOffloadCallback {
/**
* Called when an asynchronous event is generated by the hardware
- * management process. Events which are common for 1.0 and 1.1 HAL
- * MUST be fired on both 1.0 and 1.1 callback.
+ * management process. Implementations that report events via this callback
+ * should not invoke onEvent of 1.0 HAL.
*/
oneway onEvent_1_1(OffloadCallbackEvent event);
};
diff --git a/tv/tuner/1.0/vts/functional/AndroidTest.xml b/tv/tuner/1.0/vts/functional/AndroidTest.xml
index 3a2db27..18c2b59 100644
--- a/tv/tuner/1.0/vts/functional/AndroidTest.xml
+++ b/tv/tuner/1.0/vts/functional/AndroidTest.xml
@@ -30,5 +30,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="VtsHalTvTunerV1_0TargetTest" />
+ <option name="native-test-timeout" value="30m" />
</test>
</configuration>
diff --git a/tv/tuner/1.0/vts/functional/DescramblerTests.cpp b/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
index 2e27475..67f6bae 100644
--- a/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
+++ b/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
@@ -53,12 +53,15 @@
return failure();
}
- auto status = mCas->setSessionPrivateData(sessionId, hidlPvtData);
- if (status != android::hardware::cas::V1_0::Status::OK) {
- ALOGW("[vts] Failed to set session private data");
- mCas->closeSession(sessionId);
- return failure();
+ if (hidlPvtData.size() > 0) {
+ auto status = mCas->setSessionPrivateData(sessionId, hidlPvtData);
+ if (status != android::hardware::cas::V1_0::Status::OK) {
+ ALOGW("[vts] Failed to set session private data");
+ mCas->closeSession(sessionId);
+ return failure();
+ }
}
+
return success();
}
diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
index 62093cc..b39abe3 100644
--- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
+++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
@@ -267,7 +267,9 @@
uint32_t demuxId;
sp<IDemux> demux;
ASSERT_TRUE(mDemuxTests.openDemux(demux, demuxId));
+ mDvrTests.setDemux(demux);
+ DvrConfig dvrSourceConfig;
if (record.hasFrontendConnection) {
uint32_t feId;
mFrontendTests.getFrontendIdByType(frontendConf.type, feId);
@@ -275,13 +277,17 @@
ASSERT_TRUE(mFrontendTests.openFrontendById(feId));
ASSERT_TRUE(mFrontendTests.setFrontendCallback());
ASSERT_TRUE(mDemuxTests.setDemuxFrontendDataSource(feId));
+ } else {
+ dvrSourceConfig = dvrMap[record.dvrSourceId];
+ ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrSourceConfig.type, dvrSourceConfig.bufferSize));
+ ASSERT_TRUE(mDvrTests.configDvrPlayback(dvrSourceConfig.settings));
+ ASSERT_TRUE(mDvrTests.getDvrPlaybackMQDescriptor());
}
uint32_t filterId;
sp<IFilter> filter;
mFilterTests.setDemux(demux);
- mDvrTests.setDemux(demux);
ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrConf.type, dvrConf.bufferSize));
ASSERT_TRUE(mDvrTests.configDvrRecord(dvrConf.settings));
ASSERT_TRUE(mDvrTests.getDvrRecordMQDescriptor());
@@ -327,6 +333,7 @@
mFrontendTests.setDemux(demux);
} else {
dvrSourceConfig = dvrMap[descrambling.dvrSourceId];
+ mDvrTests.setDemux(demux);
ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrSourceConfig.type, dvrSourceConfig.bufferSize));
ASSERT_TRUE(mDvrTests.configDvrPlayback(dvrSourceConfig.settings));
ASSERT_TRUE(mDvrTests.getDvrPlaybackMQDescriptor());
@@ -641,7 +648,7 @@
TEST_P(TunerRecordHidlTest, LnbRecordDataFlowWithTsRecordFilterTest) {
description("Feed ts data from Fe with Lnb to recording and test with ts record filter");
- if (lnbRecord.support) {
+ if (!lnbRecord.support) {
return;
}
recordSingleFilterTestWithLnb(filterMap[lnbRecord.recordFilterId],
@@ -651,7 +658,7 @@
TEST_P(TunerDescramblerHidlTest, CreateDescrambler) {
description("Create Descrambler");
- if (descrambling.support) {
+ if (!descrambling.support) {
return;
}
uint32_t demuxId;
@@ -678,7 +685,7 @@
TEST_P(TunerDescramblerHidlTest, ScrambledBroadcastDataFlowMediaFiltersTest) {
description("Test ts audio filter in scrambled broadcast use case");
- if (descrambling.support) {
+ if (!descrambling.support) {
return;
}
set<FilterConfig> filterConfs;
diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
index 885cafd..2cea181 100644
--- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
+++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
@@ -216,8 +216,10 @@
return false;
}
- bool filterIsValid = filterMap.find(live.audioFilterId) != filterMap.end() &&
- filterMap.find(live.videoFilterId) != filterMap.end();
+ bool filterIsValid = (live.hasFrontendConnection)
+ ? filterMap.find(live.audioFilterId) != filterMap.end() &&
+ filterMap.find(live.videoFilterId) != filterMap.end()
+ : true;
filterIsValid &= playback.support
? (filterMap.find(playback.audioFilterId) != filterMap.end() &&
filterMap.find(playback.videoFilterId) != filterMap.end())
diff --git a/tv/tuner/1.1/vts/functional/AndroidTest.xml b/tv/tuner/1.1/vts/functional/AndroidTest.xml
index 28f95db..3e6878c 100644
--- a/tv/tuner/1.1/vts/functional/AndroidTest.xml
+++ b/tv/tuner/1.1/vts/functional/AndroidTest.xml
@@ -29,5 +29,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="VtsHalTvTunerV1_1TargetTest" />
+ <option name="native-test-timeout" value="30m" />
</test>
</configuration>
diff --git a/tv/tuner/1.1/vts/functional/FilterTests.cpp b/tv/tuner/1.1/vts/functional/FilterTests.cpp
index 3bcf32a..4dff853 100644
--- a/tv/tuner/1.1/vts/functional/FilterTests.cpp
+++ b/tv/tuner/1.1/vts/functional/FilterTests.cpp
@@ -306,8 +306,12 @@
android::hardware::tv::tuner::V1_1::IFilter::castFrom(mFilters[filterId]);
if (filter_v1_1 != NULL) {
status = filter_v1_1->configureMonitorEvent(monitorEventTypes);
- mFilterCallbacks[filterId]->testFilterScramblingEvent();
- mFilterCallbacks[filterId]->testFilterIpCidEvent();
+ if (monitorEventTypes & DemuxFilterMonitorEventType::SCRAMBLING_STATUS) {
+ mFilterCallbacks[filterId]->testFilterScramblingEvent();
+ }
+ if (monitorEventTypes & DemuxFilterMonitorEventType::IP_CID_CHANGE) {
+ mFilterCallbacks[filterId]->testFilterIpCidEvent();
+ }
} else {
ALOGW("[vts] Can't cast IFilter into v1_1.");
return failure();
diff --git a/tv/tuner/1.1/vts/functional/FilterTests.h b/tv/tuner/1.1/vts/functional/FilterTests.h
index 59611fa..72c8129 100644
--- a/tv/tuner/1.1/vts/functional/FilterTests.h
+++ b/tv/tuner/1.1/vts/functional/FilterTests.h
@@ -57,6 +57,7 @@
using android::hardware::tv::tuner::V1_1::AvStreamType;
using android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
using android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
+using android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEventType;
using android::hardware::tv::tuner::V1_1::IFilterCallback;
using android::hardware::tv::tuner::V1_1::ITuner;
diff --git a/tv/tuner/1.1/vts/functional/VtsHalTvTunerV1_1TargetTest.cpp b/tv/tuner/1.1/vts/functional/VtsHalTvTunerV1_1TargetTest.cpp
index e70c320..1a9def8 100644
--- a/tv/tuner/1.1/vts/functional/VtsHalTvTunerV1_1TargetTest.cpp
+++ b/tv/tuner/1.1/vts/functional/VtsHalTvTunerV1_1TargetTest.cpp
@@ -112,8 +112,8 @@
ASSERT_TRUE(mFilterTests.openFilterInDemux(filterConf.config1_0.type,
filterConf.config1_0.bufferSize));
ASSERT_TRUE(mFilterTests.getNewlyOpenedFilterId_64bit(filterId));
- ASSERT_TRUE(mFilterTests.getSharedAvMemoryHandle(filterId));
ASSERT_TRUE(mFilterTests.configFilter(filterConf.config1_0.settings, filterId));
+ ASSERT_TRUE(mFilterTests.getSharedAvMemoryHandle(filterId));
ASSERT_TRUE(mFilterTests.configAvFilterStreamType(filterConf.streamType, filterId));
ASSERT_TRUE(mFilterTests.getFilterMQDescriptor(filterId, filterConf.config1_0.getMqDesc));
ASSERT_TRUE(mFilterTests.startFilter(filterId));
diff --git a/tv/tuner/config/TunerTestingConfigReaderV1_0.h b/tv/tuner/config/TunerTestingConfigReaderV1_0.h
index f7f72b0..d049b07 100644
--- a/tv/tuner/config/TunerTestingConfigReaderV1_0.h
+++ b/tv/tuner/config/TunerTestingConfigReaderV1_0.h
@@ -52,6 +52,7 @@
using android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
using android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
using android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
+using android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
using android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
using android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
using android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
@@ -480,7 +481,6 @@
return;
}
auto recordConfig = *dataFlow.getFirstDvrRecord();
- record.frontendId = recordConfig.getFrontendConnection();
record.recordFilterId = recordConfig.getRecordFilterConnection();
record.dvrRecordId = recordConfig.getDvrRecordConnection();
if (recordConfig.hasDvrSoftwareFeConnection()) {
@@ -489,6 +489,7 @@
if (recordConfig.getHasFrontendConnection()) {
record.hasFrontendConnection = true;
record.dvrSourceId = emptyHardwareId;
+ record.frontendId = recordConfig.getFrontendConnection();
} else {
record.hasFrontendConnection = false;
record.dvrSourceId = recordConfig.getDvrSourceConnection();
@@ -504,7 +505,6 @@
return;
}
auto descConfig = *dataFlow.getFirstDescrambling();
- descrambling.frontendId = descConfig.getFrontendConnection();
descrambling.descramblerId = descConfig.getDescramblerConnection();
descrambling.audioFilterId = descConfig.getAudioFilterConnection();
descrambling.videoFilterId = descConfig.getVideoFilterConnection();
@@ -514,6 +514,7 @@
if (descConfig.getHasFrontendConnection()) {
descrambling.hasFrontendConnection = true;
descrambling.dvrSourceId = emptyHardwareId;
+ descrambling.frontendId = descConfig.getFrontendConnection();
} else {
descrambling.hasFrontendConnection = false;
descrambling.dvrSourceId = descConfig.getDvrSourceConnection();
@@ -593,6 +594,16 @@
}
dvbtSettings.bandwidth = static_cast<FrontendDvbtBandwidth>(dvbt->getBandwidth());
dvbtSettings.isHighPriority = dvbt->getIsHighPriority();
+ dvbtSettings.hierarchy = static_cast<FrontendDvbtHierarchy>(dvbt->getHierarchy());
+ dvbtSettings.hpCoderate = static_cast<FrontendDvbtCoderate>(dvbt->getHpCoderate());
+ dvbtSettings.lpCoderate = static_cast<FrontendDvbtCoderate>(dvbt->getLpCoderate());
+ dvbtSettings.guardInterval =
+ static_cast<FrontendDvbtGuardInterval>(dvbt->getGuardInterval());
+ dvbtSettings.standard = static_cast<FrontendDvbtStandard>(dvbt->getStandard());
+ dvbtSettings.isMiso = dvbt->getIsMiso();
+ dvbtSettings.plpMode = static_cast<FrontendDvbtPlpMode>(dvbt->getPlpMode());
+ dvbtSettings.plpId = dvbt->getPlpId();
+ dvbtSettings.plpGroupId = dvbt->getPlpGroupId();
if (dvbt->hasConstellation()) {
dvbtSettings.constellation =
static_cast<FrontendDvbtConstellation>(dvbt->getConstellation());
diff --git a/tv/tuner/config/api/current.txt b/tv/tuner/config/api/current.txt
index ef73315..d026bf9 100644
--- a/tv/tuner/config/api/current.txt
+++ b/tv/tuner/config/api/current.txt
@@ -180,11 +180,29 @@
ctor public DvbtFrontendSettings();
method @Nullable public java.math.BigInteger getBandwidth();
method @Nullable public java.math.BigInteger getConstellation();
+ method @Nullable public java.math.BigInteger getGuardInterval();
+ method @Nullable public java.math.BigInteger getHierarchy();
+ method @Nullable public java.math.BigInteger getHpCoderate();
method @Nullable public java.math.BigInteger getIsHighPriority();
+ method @Nullable public java.math.BigInteger getIsMiso();
+ method @Nullable public java.math.BigInteger getLpCoderate();
+ method @Nullable public java.math.BigInteger getPlpGroupId();
+ method @Nullable public java.math.BigInteger getPlpId();
+ method @Nullable public java.math.BigInteger getPlpMode();
+ method @Nullable public java.math.BigInteger getStandard();
method @Nullable public java.math.BigInteger getTransmissionMode();
method public void setBandwidth(@Nullable java.math.BigInteger);
method public void setConstellation(@Nullable java.math.BigInteger);
+ method public void setGuardInterval(@Nullable java.math.BigInteger);
+ method public void setHierarchy(@Nullable java.math.BigInteger);
+ method public void setHpCoderate(@Nullable java.math.BigInteger);
method public void setIsHighPriority(@Nullable java.math.BigInteger);
+ method public void setIsMiso(@Nullable java.math.BigInteger);
+ method public void setLpCoderate(@Nullable java.math.BigInteger);
+ method public void setPlpGroupId(@Nullable java.math.BigInteger);
+ method public void setPlpId(@Nullable java.math.BigInteger);
+ method public void setPlpMode(@Nullable java.math.BigInteger);
+ method public void setStandard(@Nullable java.math.BigInteger);
method public void setTransmissionMode(@Nullable java.math.BigInteger);
}
diff --git a/tv/tuner/config/sample_tuner_vts_config_1_0.xml b/tv/tuner/config/sample_tuner_vts_config_1_0.xml
index 2624076..347e984 100644
--- a/tv/tuner/config/sample_tuner_vts_config_1_0.xml
+++ b/tv/tuner/config/sample_tuner_vts_config_1_0.xml
@@ -54,7 +54,10 @@
<frontends>
<frontend id="FE_DEFAULT" type="DVBT" isSoftwareFrontend="true"
connectToCicamId="0" frequency="578000" endFrequency="800000">
- <dvbtFrontendSettings bandwidth="8" transmissionMode="1" isHighPriority="1"/>
+ <dvbtFrontendSettings bandwidth="8" transmissionMode="1" isHighPriority="1"
+ constellation="1" hierarchy="1" hpCoderate="1" lpCoderate="1"
+ guardInterval="1" standard="1" isMiso="0" plpMode="1"
+ plpId="0" plpGroupId="0"/>
</frontend>
<frontend id="FE_DVBS_0" type="DVBS" isSoftwareFrontend="true"
connectToCicamId="0" frequency="578000" endFrequency="800000">
diff --git a/tv/tuner/config/tuner_testing_dynamic_configuration.xsd b/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
index 5216837..6a04b7e 100644
--- a/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
+++ b/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
@@ -61,9 +61,18 @@
<xs:complexType name="dvbtFrontendSettings">
<xs:attribute name="bandwidth" type="xs:nonNegativeInteger" use="required"/>
- <xs:attribute name="transmissionMode" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="constellation" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="guardInterval" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="hierarchy" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="hpCoderate" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="lpCoderate" type="xs:nonNegativeInteger" use="required"/>
<xs:attribute name="isHighPriority" type="xs:nonNegativeInteger" use="required"/>
- <xs:attribute name="constellation" type="xs:nonNegativeInteger" use="optional"/>
+ <xs:attribute name="isMiso" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpGroupId" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpId" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpMode" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="standard" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="transmissionMode" type="xs:nonNegativeInteger" use="required"/>
</xs:complexType>
<xs:complexType name="dvbsFrontendSettings">
<xs:attribute name="inputStreamId" type="xs:nonNegativeInteger" use="required"/>
diff --git a/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp b/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
index 4d03ebf..713ec75 100644
--- a/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
+++ b/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
@@ -276,7 +276,9 @@
if (!status.isOk())
continue;
- std::chrono::milliseconds timeout{lengthMs * 2};
+ //TODO(b/187207798): revert back to conservative timeout values once
+ //latencies have been fixed
+ std::chrono::milliseconds timeout{lengthMs * 8};
EXPECT_EQ(completionFuture.wait_for(timeout), std::future_status::ready);
}
}
@@ -588,7 +590,9 @@
EXPECT_EQ(Status::EX_NONE, vibrator->compose(composite, callback).exceptionCode())
<< toString(primitive);
- EXPECT_EQ(completionFuture.wait_for(duration * 2), std::future_status::ready)
+ //TODO(b/187207798): revert back to conservative timeout values once
+ //latencies have been fixed
+ EXPECT_EQ(completionFuture.wait_for(duration * 4), std::future_status::ready)
<< toString(primitive);
end = high_resolution_clock::now();
@@ -739,7 +743,9 @@
sp<CompletionCallback> callback =
new CompletionCallback([&completionPromise] { completionPromise.set_value(); });
uint32_t durationMs = 2100; // Sum of 2 active and 1 braking below
- std::chrono::milliseconds timeout{durationMs * 2};
+ //TODO(b/187207798): revert back to conservative timeout values once
+ //latencies have been fixed
+ std::chrono::milliseconds timeout{durationMs * 4};
ActivePwle active = composeValidActivePwle(vibrator, capabilities);