Merge "Add functional folder for vts 1.4."
diff --git a/compatibility_matrices/Android.mk b/compatibility_matrices/Android.mk
index 6be6930..425e376 100644
--- a/compatibility_matrices/Android.mk
+++ b/compatibility_matrices/Android.mk
@@ -17,8 +17,9 @@
LOCAL_PATH := $(call my-dir)
BUILD_FRAMEWORK_COMPATIBILITY_MATRIX := $(LOCAL_PATH)/compatibility_matrix.mk
+my_empty_manifest := $(LOCAL_PATH)/manifest.empty.xml
-# Framework Compatibility Matrix (common to all FCM versions)
+# System Compatibility Matrix (common to all FCM versions)
include $(CLEAR_VARS)
include $(LOCAL_PATH)/clear_vars.mk
@@ -26,6 +27,7 @@
LOCAL_MODULE_STEM := compatibility_matrix.device.xml
# define LOCAL_MODULE_CLASS for local-generated-sources-dir.
LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := vintf
ifndef DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE
LOCAL_SRC_FILES := compatibility_matrix.empty.xml
@@ -37,10 +39,9 @@
# Enforce that DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE does not specify required HALs
# by checking it against an empty manifest. But the empty manifest needs to contain
# BOARD_SEPOLICY_VERS to be compatible with DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE.
-my_manifest_src_file := $(LOCAL_PATH)/manifest.empty.xml
my_gen_check_manifest := $(local-generated-sources-dir)/manifest.check.xml
-$(my_gen_check_manifest): PRIVATE_SRC_FILE := $(my_manifest_src_file)
-$(my_gen_check_manifest): $(my_manifest_src_file) $(HOST_OUT_EXECUTABLES)/assemble_vintf
+$(my_gen_check_manifest): PRIVATE_SRC_FILE := $(my_empty_manifest)
+$(my_gen_check_manifest): $(my_empty_manifest) $(HOST_OUT_EXECUTABLES)/assemble_vintf
BOARD_SEPOLICY_VERS=$(BOARD_SEPOLICY_VERS) \
VINTF_IGNORE_TARGET_FCM_VERSION=true \
$(HOST_OUT_EXECUTABLES)/assemble_vintf -i $(PRIVATE_SRC_FILE) -o $@
@@ -49,7 +50,6 @@
LOCAL_ASSEMBLE_VINTF_FLAGS += -c "$(my_gen_check_manifest)"
my_gen_check_manifest :=
-my_manifest_src_file :=
endif # DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE
@@ -61,18 +61,57 @@
include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
+# Product Compatibility Matrix
+
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/clear_vars.mk
+LOCAL_MODULE := product_compatibility_matrix.xml
+
+ifndef DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE
+my_framework_matrix_deps :=
+include $(BUILD_PHONY_PACKAGE)
+else # DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE
+
+LOCAL_MODULE_STEM := compatibility_matrix.xml
+LOCAL_PRODUCT_MODULE := true
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := vintf
+
+# DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE specify an absolute path
+LOCAL_GENERATED_SOURCES := $(DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE)
+
+# Enforce that DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE does not specify required HALs
+# by checking it against an empty manifest.
+LOCAL_GEN_FILE_DEPENDENCIES += $(my_empty_manifest)
+LOCAL_ASSEMBLE_VINTF_FLAGS += -c "$(my_empty_manifest)"
+
+my_framework_matrix_deps := $(LOCAL_MODULE)
+
+include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
+
+endif # DEVICE_PRODUCT_COMPATIBILITY_MATRIX_FILE
+
my_system_matrix_deps := \
framework_compatibility_matrix.legacy.xml \
framework_compatibility_matrix.1.xml \
framework_compatibility_matrix.2.xml \
framework_compatibility_matrix.3.xml \
framework_compatibility_matrix.current.xml \
- framework_compatibility_matrix.device.xml
+ framework_compatibility_matrix.device.xml \
-# Phony target that installs all framework compatibility matrix files
+my_framework_matrix_deps += \
+ $(my_system_matrix_deps)
+
+# Phony target that installs all system compatibility matrix files
+include $(CLEAR_VARS)
+LOCAL_MODULE := system_compatibility_matrix.xml
+LOCAL_REQUIRED_MODULES := $(my_system_matrix_deps)
+include $(BUILD_PHONY_PACKAGE)
+
+# Phony target that installs all framework compatibility matrix files (system + product)
include $(CLEAR_VARS)
LOCAL_MODULE := framework_compatibility_matrix.xml
-LOCAL_REQUIRED_MODULES := $(my_system_matrix_deps)
+LOCAL_REQUIRED_MODULES := $(my_framework_matrix_deps)
include $(BUILD_PHONY_PACKAGE)
# Final Framework Compatibility Matrix for OTA
@@ -80,7 +119,7 @@
include $(LOCAL_PATH)/clear_vars.mk
LOCAL_MODULE := verified_assembled_system_matrix.xml
LOCAL_MODULE_PATH := $(PRODUCT_OUT)
-LOCAL_REQUIRED_MODULES := $(my_system_matrix_deps)
+LOCAL_REQUIRED_MODULES := $(my_framework_matrix_deps)
LOCAL_GENERATED_SOURCES := $(call module-installed-files,$(LOCAL_REQUIRED_MODULES))
LOCAL_ADD_VBMETA_VERSION_OVERRIDE := true
@@ -97,4 +136,6 @@
BUILT_SYSTEM_MATRIX := $(LOCAL_BUILT_MODULE)
my_system_matrix_deps :=
+my_framework_matrix_deps :=
+my_empty_manifest :=
BUILD_FRAMEWORK_COMPATIBILITY_MATRIX :=
diff --git a/compatibility_matrices/CleanSpec.mk b/compatibility_matrices/CleanSpec.mk
new file mode 100644
index 0000000..9b150ed
--- /dev/null
+++ b/compatibility_matrices/CleanSpec.mk
@@ -0,0 +1,47 @@
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list. These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+# $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list. E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/vintf/compatibility_matrix.device.xml)
diff --git a/compatibility_matrices/compatibility_matrix.mk b/compatibility_matrices/compatibility_matrix.mk
index bafc84b..d22e510 100644
--- a/compatibility_matrices/compatibility_matrix.mk
+++ b/compatibility_matrices/compatibility_matrix.mk
@@ -17,7 +17,8 @@
##### Input Variables:
# LOCAL_MODULE: required. Module name for the build system.
# LOCAL_MODULE_CLASS: optional. Default is ETC.
-# LOCAL_MODULE_PATH: optional. Path of output file. Default is $(TARGET_OUT)/etc/vintf.
+# LOCAL_MODULE_PATH / LOCAL_MODULE_RELATIVE_PATH: required. (Relative) path of output file.
+# If not defined, LOCAL_MODULE_RELATIVE_PATH will be "vintf".
# LOCAL_MODULE_STEM: optional. Name of output file. Default is $(LOCAL_MODULE).
# LOCAL_SRC_FILES: required. Local source files provided to assemble_vintf
# (command line argument -i).
@@ -48,7 +49,9 @@
endif
ifndef LOCAL_MODULE_PATH
-LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/vintf
+ifndef LOCAL_MODULE_RELATIVE_PATH
+$(error Either LOCAL_MODULE_PATH or LOCAL_MODULE_RELATIVE_PATH must be defined.)
+endif
endif
GEN := $(local-generated-sources-dir)/$(LOCAL_MODULE_STEM)
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index dd6f934..2920cec 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -22,6 +22,9 @@
],
defaults: ["VtsHalTargetTestDefaults"],
export_include_dirs: ["."],
+ shared_libs: [
+ "libnativewindow",
+ ],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
@@ -47,6 +50,9 @@
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
],
+ shared_libs: [
+ "libnativewindow",
+ ],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.cpp b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
index a1c5a1a..c30702c 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
@@ -135,12 +135,18 @@
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
mErrorStatus = errorStatus;
+ mOutputShapes = {};
+ mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
CallbackBase::notify();
return Void();
}
-Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
+ const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) {
mErrorStatus = errorStatus;
+ mOutputShapes = outputShapes;
+ mTiming = timing;
CallbackBase::notify();
return Void();
}
@@ -150,6 +156,16 @@
return mErrorStatus;
}
+const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
+ wait();
+ return mOutputShapes;
+}
+
+Timing ExecutionCallback::getTiming() {
+ wait();
+ return mTiming;
+}
+
} // namespace implementation
} // namespace V1_2
} // namespace neuralnetworks
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index e89980d..4707d0a 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -275,8 +275,9 @@
* Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
* be called exactly once on a given ExecutionCallback object.
*
- * @param status Error status returned from asynchronously preparing the
- * model; will be:
+ * @param status Error status returned from launching the asynchronous task
+ * (if the launch fails) or from the asynchronous task itself
+ * (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
@@ -285,27 +286,100 @@
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(ErrorStatus status) override;
- Return<void> notify_1_2(ErrorStatus status) override;
+
+ /**
+ * Similar to IExecutionCallback::notify, but for V1_2::IPreparedModel to
+ * also notify output shapes along with error status.
+ *
+ * @param status Error status returned from launching the asynchronous task
+ * (if the launch fails) or from the asynchronous task itself
+ * (if the launch succeeds). Must be:
+ * - NONE if the asynchronous execution was successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the asynchronous task resulted in an
+ * unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if at least one output
+ * operand buffer is not large enough to store the
+ * corresponding output
+ * - INVALID_ARGUMENT if one of the input arguments to
+ * prepareModel is invalid
+ * @param outputShapes A list of shape information of model output operands.
+ * The index into "outputShapes" corresponds to the index
+ * of the output operand in the Request outputs vector.
+ * outputShapes must be empty unless the status is either
+ * NONE or OUTPUT_INSUFFICIENT_SIZE.
+ * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when
+ * launching the execution and status is NONE, all times must
+ * be reported as UINT64_MAX. A driver may choose to report
+ * any time as UINT64_MAX, indicating that particular measurement is
+ * not available.
+ */
+ Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) override;
+
+ // An overload of the latest notify interface to hide the version from ExecutionBuilder.
+ Return<void> notify(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) {
+ return notify_1_2(status, outputShapes, timing);
+ }
/**
* Retrieves the error status returned from the asynchronous task launched
- * by IPreparedModel::execute. If IPreparedModel::execute has not finished
+ * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If
+ * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished
* asynchronously executing, this call will block until the asynchronous task
* notifies the object.
*
- * @return status Error status returned from asynchronously preparing the
- * model; will be:
+ * @return status Error status returned from launching the asynchronous task
+ * (if the launch fails) or from the asynchronous task itself
+ * (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
- * - GENERAL_FAILURE if there is an unspecified error
- * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
- * not large enough to store the resultant values
- * - INVALID_ARGUMENT if the input request is invalid
+ * - GENERAL_FAILURE if the asynchronous task resulted in an
+ * unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if at least one output
+ * operand buffer is not large enough to store the
+ * corresponding output
+ * - INVALID_ARGUMENT if one of the input arguments to
+ * prepareModel is invalid
*/
ErrorStatus getStatus();
- private:
- ErrorStatus mErrorStatus;
+ /**
+ * Retrieves the output shapes returned from the asynchronous task launched
+ * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
+ * asynchronously executing, this call will block until the asynchronous task
+ * notifies the object.
+ *
+ * If the asynchronous task was launched by IPreparedModel::execute, an empty vector
+ * will be returned.
+ *
+ * @return outputShapes A list of shape information of model output operands.
+ * The index into "outputShapes" corresponds to the index
+ * of the output operand in the Request outputs vector.
+ * outputShapes must be empty unless the status is either
+ * NONE or OUTPUT_INSUFFICIENT_SIZE.
+ */
+ const std::vector<OutputShape>& getOutputShapes();
+
+ /**
+ * Retrieves the duration of execution ofthe asynchronous task launched
+ * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
+ * asynchronously executing, this call will block until the asynchronous task
+ * notifies the object.
+ *
+ * If the asynchronous task was launched by IPreparedModel::execute, every time
+ * must be UINT64_MAX.
+ *
+ * @return timing Duration of the execution. Every time must be UINT64_MAX unless
+ * the status is NONE.
+ */
+ Timing getTiming();
+
+ private:
+ ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ std::vector<OutputShape> mOutputShapes = {};
+ Timing mTiming = {};
};
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 3b4eb21..65c425e 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -45,20 +45,16 @@
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
-using ::test_helper::Float32Operands;
using ::test_helper::for_all;
using ::test_helper::for_each;
-using ::test_helper::Int32Operands;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
-using ::test_helper::MixedTypedIndex;
-using ::test_helper::Quant8Operands;
using ::test_helper::resize_accordingly;
template <typename T>
-void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
- MixedTyped& test = *dst;
- for_each<T>(test, [&ra, src](int index, std::vector<T>& m) {
+void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
+ char* src) {
+ for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
char* begin = src + ra[index].location.offset;
memcpy(m.data(), begin, ra[index].location.length);
@@ -66,36 +62,52 @@
}
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
- copy_back_<float>(dst, ra, src);
- copy_back_<int32_t>(dst, ra, src);
- copy_back_<uint8_t>(dst, ra, src);
- copy_back_<int16_t>(dst, ra, src);
- copy_back_<_Float16>(dst, ra, src);
- copy_back_<bool8>(dst, ra, src);
- copy_back_<int8_t>(dst, ra, src);
- static_assert(7 == std::tuple_size<MixedTyped>::value,
+ copy_back_(&dst->float32Operands, ra, src);
+ copy_back_(&dst->int32Operands, ra, src);
+ copy_back_(&dst->quant8AsymmOperands, ra, src);
+ copy_back_(&dst->quant16SymmOperands, ra, src);
+ copy_back_(&dst->float16Operands, ra, src);
+ copy_back_(&dst->bool8Operands, ra, src);
+ copy_back_(&dst->quant8ChannelOperands, ra, src);
+ copy_back_(&dst->quant16AsymmOperands, ra, src);
+ static_assert(8 == MixedTyped::kNumTypes,
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
- const Request& request,
+ const Request& request, MeasureTiming,
sp<ExecutionCallback>& callback) {
return preparedModel->execute(request, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- const Request& request,
+ const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
- return preparedModel->execute_1_2(request, callback);
+ return preparedModel->execute_1_2(request, measure, callback);
}
-static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&) {
+static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&,
+ MeasureTiming, hidl_vec<OutputShape>*, Timing*) {
ADD_FAILURE() << "asking for synchronous execution at V1_0";
return ErrorStatus::GENERAL_FAILURE;
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- const Request& request) {
- return preparedModel->executeSynchronously(request);
+ const Request& request, MeasureTiming measure,
+ hidl_vec<OutputShape>* outputShapes,
+ Timing* timing) {
+ ErrorStatus result;
+ Return<void> ret = preparedModel->executeSynchronously(
+ request, measure,
+ [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
+ const Timing& time) {
+ result = error;
+ *outputShapes = shapes;
+ *timing = time;
+ });
+ if (!ret.isOk()) {
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return result;
}
enum class Synchronously { NO, YES };
const float kDefaultAtol = 1e-5f;
@@ -103,8 +115,8 @@
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol,
- float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) {
+ bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
+ Synchronously sync, MeasureTiming measure, bool testDynamicOutputShape) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
@@ -114,7 +126,7 @@
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
- const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty();
+ const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
@@ -197,6 +209,9 @@
inputMemory->commit();
outputMemory->commit();
+ ErrorStatus executionStatus;
+ hidl_vec<OutputShape> outputShapes;
+ Timing timing;
if (sync == Synchronously::NO) {
SCOPED_TRACE("asynchronous");
@@ -204,23 +219,52 @@
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
- preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
- executionCallback);
+ preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
+ measure, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
- ErrorStatus executionReturnStatus = executionCallback->getStatus();
- EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
+ executionStatus = executionCallback->getStatus();
+ outputShapes = executionCallback->getOutputShapes();
+ timing = executionCallback->getTiming();
} else {
SCOPED_TRACE("synchronous");
// execute
- Return<ErrorStatus> executionStatus = ExecutePreparedModel(
- preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
- ASSERT_TRUE(executionStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionStatus));
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
+ measure, &outputShapes, &timing);
+ ASSERT_TRUE(executionReturnStatus.isOk());
+ executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+ }
+
+ if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "execute model that it does not support."
+ << std::endl;
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ if (measure == MeasureTiming::NO) {
+ EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+ EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+ } else {
+ if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
+ EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
+ }
+ }
+
+ // Go through all outputs, overwrite output dimensions with returned output shapes
+ if (testDynamicOutputShape) {
+ ASSERT_NE(outputShapes.size(), 0);
+ for_each<uint32_t>(test.operandDimensions,
+ [&outputShapes](int idx, std::vector<uint32_t>& dim) {
+ dim = outputShapes[idx].dimensions;
+ });
}
// validate results
@@ -242,9 +286,10 @@
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, Synchronously sync) {
+ bool hasRelaxedFloat32Model, Synchronously sync, MeasureTiming measure,
+ bool testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
- kDefaultRtol, sync);
+ kDefaultRtol, sync, measure, testDynamicOutputShape);
}
static void getPreparedModel(sp<PreparedModelCallback> callback,
@@ -300,7 +345,8 @@
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
EvaluatePreparedModel(preparedModel, is_ignored, examples,
- /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol);
+ /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO,
+ MeasureTiming::NO, /*testDynamicOutputShape=*/false);
}
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -346,12 +392,14 @@
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16);
+ model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO,
+ MeasureTiming::NO, /*testDynamicOutputShape=*/false);
}
// TODO: Reduce code duplication.
void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+ bool testDynamicOutputShape) {
V1_2::Model model = create_model();
// see if service can handle model
@@ -393,9 +441,17 @@
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Synchronously::NO);
+ model.relaxComputationFloat32toFloat16, Synchronously::NO,
+ MeasureTiming::NO, testDynamicOutputShape);
EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Synchronously::YES);
+ model.relaxComputationFloat32toFloat16, Synchronously::YES,
+ MeasureTiming::NO, testDynamicOutputShape);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Synchronously::NO,
+ MeasureTiming::YES, testDynamicOutputShape);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Synchronously::YES,
+ MeasureTiming::YES, testDynamicOutputShape);
}
} // namespace generated_tests
diff --git a/neuralnetworks/1.2/Android.bp b/neuralnetworks/1.2/Android.bp
index 7d13104..9057b94 100644
--- a/neuralnetworks/1.2/Android.bp
+++ b/neuralnetworks/1.2/Android.bp
@@ -8,6 +8,8 @@
},
srcs: [
"types.hal",
+ "IBurstCallback.hal",
+ "IBurstContext.hal",
"IDevice.hal",
"IExecutionCallback.hal",
"IPreparedModel.hal",
@@ -20,6 +22,9 @@
"android.hidl.safe_union@1.0",
],
types: [
+ "DeviceType",
+ "FmqRequestDatum",
+ "FmqResultDatum",
"Model",
"Operand",
"OperandType",
@@ -27,6 +32,8 @@
"Operation",
"OperationType",
"OperationTypeRange",
+ "OutputShape",
+ "SymmPerChannelQuantParams",
],
gen_java: false,
}
diff --git a/neuralnetworks/1.2/IBurstCallback.hal b/neuralnetworks/1.2/IBurstCallback.hal
new file mode 100644
index 0000000..3f82e31
--- /dev/null
+++ b/neuralnetworks/1.2/IBurstCallback.hal
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+
+/**
+ * Callback object used by a service to retreive memory objects based on unique
+ * identifiers ("slots").
+ */
+interface IBurstCallback {
+ /**
+ * Get the memory regions that correspond to slot ids. The slot ids are are
+ * unique to the burst object.
+ *
+ * @param slots Values uniquely identifying memory regions within a Burst.
+ * @return status Indicates whether the memories were successfully returned;
+ * must be:
+ * - NONE if the memory region was successfully retrieved
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if a slot number is invalid
+ * @return buffers Memory buffers corresponding to the slot numbers. If an
+ * error occurs, an empty vector must be returned for
+ * buffers, otherwise slots.size() == buffers.size().
+ */
+ getMemories(vec<int32_t> slots) generates (ErrorStatus status, vec<memory> buffers);
+};
diff --git a/neuralnetworks/1.2/IBurstContext.hal b/neuralnetworks/1.2/IBurstContext.hal
new file mode 100644
index 0000000..60bf53b
--- /dev/null
+++ b/neuralnetworks/1.2/IBurstContext.hal
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+/**
+ * Context object to manage the resources of a burst.
+ */
+interface IBurstContext {
+ /**
+ * freeMemory is used by the client to signal to the service that a memory
+ * buffer corresponding to a slot number is no longer needed by the client.
+ *
+ * The slot ids are unique to the burst object.
+ *
+ * @param slot Value uniquely identifying a memory region.
+ */
+ freeMemory(int32_t slot);
+};
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index 6a77961..6c3b483 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -56,6 +56,26 @@
getVersionString() generates (ErrorStatus status, string version);
/**
+ * Get the type of a given device.
+ *
+ * The device type can be used to help application developers to distribute
+ * Machine Learning workloads and other workloads such as graphical rendering.
+ * E.g., for an app which renders AR scenes based on real time object detection
+ * results, the developer could choose an ACCELERATOR type device for ML
+ * workloads, and reserve GPU for graphical rendering.
+ *
+ * @param status Error status returned from querying the device type. Must be:
+ * - NONE if the query was successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the query resulted in an
+ * unspecified error
+ * @param type The DeviceType of the device. Please note, this is not a
+ * bitfield of DeviceTypes. Each device must only be of a
+ * single DeviceType.
+ */
+ getType() generates (ErrorStatus status, DeviceType type);
+
+ /**
* Gets the supported operations in a model.
*
* getSupportedOperations indicates which operations of a model are fully
diff --git a/neuralnetworks/1.2/IExecutionCallback.hal b/neuralnetworks/1.2/IExecutionCallback.hal
index 667e0d6..7f6c9ee 100644
--- a/neuralnetworks/1.2/IExecutionCallback.hal
+++ b/neuralnetworks/1.2/IExecutionCallback.hal
@@ -39,10 +39,21 @@
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
- * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
- * not large enough to store the resultant values
+ * - OUTPUT_INSUFFICIENT_SIZE if at least one output
+ * operand buffer is not large enough to store the
+ * corresponding output
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
+ * @param outputShapes A list of shape information of model output operands.
+ * The index into "outputShapes" corresponds with to index
+ * of the output operand in the Request outputs vector.
+ * outputShapes must be empty unless the status is either
+ * NONE or OUTPUT_INSUFFICIENT_SIZE.
+ * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when
+ * launching the execution and status is NONE, all times must
+ * be reported as UINT64_MAX. A driver may choose to report
+ * any time as UINT64_MAX, indicating that particular measurement is
+ * not available.
*/
- oneway notify_1_2(ErrorStatus status);
+ oneway notify_1_2(ErrorStatus status, vec<OutputShape> outputShapes, Timing timing);
};
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
index 4e91c67..5d2d80f 100644
--- a/neuralnetworks/1.2/IPreparedModel.hal
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -19,6 +19,8 @@
import @1.0::ErrorStatus;
import @1.0::IPreparedModel;
import @1.0::Request;
+import IBurstCallback;
+import IBurstContext;
import IExecutionCallback;
/**
@@ -57,6 +59,10 @@
*
* @param request The input and output information on which the prepared
* model is to be executed.
+ * @param measure Specifies whether or not to measure duration of the execution.
+ * The duration runs from the time the driver sees the call
+ * to the execute_1_2 function to the time the driver invokes
+ * the callback.
* @param callback A callback object used to return the error status of
* the execution. The callback object's notify function must
* be called exactly once, even if the execution was
@@ -70,7 +76,7 @@
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
*/
- execute_1_2(Request request, IExecutionCallback callback)
+ execute_1_2(Request request, MeasureTiming measure, IExecutionCallback callback)
generates (ErrorStatus status);
/**
@@ -96,15 +102,59 @@
*
* @param request The input and output information on which the prepared
* model is to be executed.
+ * @param measure Specifies whether or not to measure duration of the execution.
+ * The duration runs from the time the driver sees the call
+ * to the executeSynchronously function to the time the driver
+ * returns from the function.
* @return status Error status of the execution, must be:
* - NONE if execution is performed successfully
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
- * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
- * not large enough to store the resultant values
+ * - OUTPUT_INSUFFICIENT_SIZE if at least one output
+ * operand buffer is not large enough to store the
+ * corresponding output
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
+ * @return outputShapes A list of shape information of model output operands.
+ * The index into "outputShapes" corresponds to the index
+ * of the output operand in the Request outputs vector.
+ * outputShapes must be empty unless the status is either
+ * NONE or OUTPUT_INSUFFICIENT_SIZE.
+ * @return Timing Duration of execution. Unless measure is YES and status is
+ * NONE, all times must be reported as UINT64_MAX. A driver may
+ * choose to report any time as UINT64_MAX, indicating that
+ * measurement is not available.
*/
- executeSynchronously(Request request)
- generates (ErrorStatus status);
+ executeSynchronously(Request request, MeasureTiming measure)
+ generates (ErrorStatus status, vec<OutputShape> outputShapes, Timing timing);
+
+ /**
+ * Configure a Burst object used to execute multiple inferences on a
+ * prepared model in rapid succession.
+ *
+ * @param callback A callback object used to retrieve memory resources
+ * corresponding to a unique identifiers ("slots").
+ * @param requestChannel Used by the client to send a serialized Request to
+ * the Burst for execution. requestChannel must not be
+ * used to pass a second Request object until a result
+ * has been received from resultChannel.
+ * @param resultChannel Used by the service to return the results of an
+ * execution to the client: the status of the execution
+ * and OutputShape of all output tensors. resultChannel
+ * must be used to return the results if a Request was
+ * sent through the requestChannel.
+ * @return status Error status of configuring the execution burst, must be:
+ * - NONE if the burst is successfully configured
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if one of the input arguments is
+ * invalid
+ * @return context Object containing all resources (such as cached
+ * hidl_memory) related to a Burst if successful, otherwise
+ * nullptr.
+ */
+ configureExecutionBurst(IBurstCallback callback,
+ fmq_sync<FmqRequestDatum> requestChannel,
+ fmq_sync<FmqResultDatum> resultChannel)
+ generates (ErrorStatus status, IBurstContext context);
};
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 40c07e7..bd8354f 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -17,6 +17,7 @@
package android.hardware.neuralnetworks@1.2;
import @1.0::DataLocation;
+import @1.0::ErrorStatus;
import @1.0::OperandLifeTime;
import @1.0::OperandType;
import @1.0::PerformanceInfo;
@@ -76,6 +77,18 @@
* where C is an index in the Channel dimension.
*/
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
+ /**
+ * A tensor of 16 bit unsigned integers that represent real numbers.
+ *
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 16 bit integer to the real value and vice versa. These two numbers are:
+ * - scale: a 32 bit floating point value greater than zero.
+ * - zeroPoint: a 32 bit integer, in range [0, 65535].
+ *
+ * The formula is:
+ * real_value = (integer_value - zeroPoint) * scale.
+ */
+ TENSOR_QUANT16_ASYMM = 12,
/* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
* OperandTypeRange::OPERAND_FUNDAMENTAL_MAX.
*/
@@ -89,7 +102,7 @@
*/
enum OperandTypeRange : uint32_t {
OPERAND_FUNDAMENTAL_MIN = 0,
- OPERAND_FUNDAMENTAL_MAX = 11,
+ OPERAND_FUNDAMENTAL_MAX = 12,
OPERAND_OEM_MIN = 10000,
OPERAND_OEM_MAX = 10001,
};
@@ -101,17 +114,17 @@
*/
enum OperationType : @1.1::OperationType {
// TODO(b/116445845): Sync docs when all ops are implemented.
- ARGMAX = 38,
- ARGMIN = 39,
- PAD_V2 = 40,
+ ABS = 38,
+ ARGMAX = 39,
+ ARGMIN = 40,
AXIS_ALIGNED_BBOX_TRANSFORM = 41,
BIDIRECTIONAL_SEQUENCE_LSTM = 42,
BIDIRECTIONAL_SEQUENCE_RNN = 43,
BOX_WITH_NMS_LIMIT = 44,
CAST = 45,
CHANNEL_SHUFFLE = 46,
- DETECTION_OUTPUT = 47,
- EMBEDDING_LOOKUP_SPARSE = 48,
+ DETECTION_POSTPROCESSING = 47,
+ EQUAL = 48,
EXP = 49,
EXPAND_DIMS = 50,
GATHER = 51,
@@ -120,47 +133,43 @@
GREATER_EQUAL = 54,
GROUPED_CONV_2D = 55,
HEATMAP_MAX_KEYPOINT = 56,
- LESS = 57,
- LESS_EQUAL = 58,
- LOG = 59,
- LOGICAL_AND = 60,
- LOGICAL_NOT = 61,
- LOGICAL_OR = 62,
- LOG_SOFTMAX = 63,
- MAXIMUM = 64,
- MINIMUM = 65,
- NEG = 66,
- POW = 67,
- PRELU = 68,
- PRIOR_BOX = 69,
- QUANTIZE = 70,
- QUANTIZED_16BIT_LSTM = 71,
- RANDOM_MULTINOMIAL = 72,
- REDUCE_PROD = 73,
- ROI_ALIGN = 74,
- RSQRT = 75,
- SELECT = 76,
- SIN = 77,
- SLICE = 78,
- SPARSE_TO_DENSE = 79,
- SPLIT = 80,
- SQRT = 81,
- TILE = 82,
- TOPK_V2 = 83,
- TRANSPOSE_CONV_2D = 84,
- UNIDIRECTIONAL_SEQUENCE_LSTM = 85,
- UNIDIRECTIONAL_SEQUENCE_RNN = 86,
- ROTATED_BBOX_TRANSFORM = 87,
- ABS = 88,
- ROI_POOLING = 89,
- EQUAL = 90,
- NOT_EQUAL = 91,
- REDUCE_SUM = 92,
- REDUCE_MAX = 93,
- REDUCE_MIN = 94,
- REDUCE_ANY = 95,
- REDUCE_ALL = 96,
- INSTANCE_NORMALIZATION = 97,
+ INSTANCE_NORMALIZATION = 57,
+ LESS = 58,
+ LESS_EQUAL = 59,
+ LOG = 60,
+ LOGICAL_AND = 61,
+ LOGICAL_NOT = 62,
+ LOGICAL_OR = 63,
+ LOG_SOFTMAX = 64,
+ MAXIMUM = 65,
+ MINIMUM = 66,
+ NEG = 67,
+ NOT_EQUAL = 68,
+ PAD_V2 = 69,
+ POW = 70,
+ PRELU = 71,
+ QUANTIZE = 72,
+ QUANTIZED_16BIT_LSTM = 73,
+ RANDOM_MULTINOMIAL = 74,
+ REDUCE_ALL = 75,
+ REDUCE_ANY = 76,
+ REDUCE_MAX = 77,
+ REDUCE_MIN = 78,
+ REDUCE_PROD = 79,
+ REDUCE_SUM = 80,
+ ROI_ALIGN = 81,
+ ROI_POOLING = 82,
+ RSQRT = 83,
+ SELECT = 84,
+ SIN = 85,
+ SLICE = 86,
+ SPLIT = 87,
+ SQRT = 88,
+ TILE = 89,
+ TOPK_V2 = 90,
+ TRANSPOSE_CONV_2D = 91,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 93,
/* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
* OperationTypeRange::OPERATION_FUNDAMENTAL_MAX.
*/
@@ -174,12 +183,32 @@
*/
enum OperationTypeRange : uint32_t {
OPERATION_FUNDAMENTAL_MIN = 0,
- OPERATION_FUNDAMENTAL_MAX = 97,
+ OPERATION_FUNDAMENTAL_MAX = 93,
OPERATION_OEM_MIN = 10000,
OPERATION_OEM_MAX = 10000,
};
/**
+ * Device types.
+ *
+ * The type of NNAPI device.
+ */
+enum DeviceType : int32_t {
+ // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no
+ // HAL equivalent of unknown type and a 1.2 HAL implementation must belong
+ // to one of the categories below.
+ /** The device does not fall into any category below. */
+ OTHER = 1,
+ /** The device runs NNAPI models on single or multi-core CPU. */
+ CPU = 2,
+ /** The device can run NNAPI models and also accelerate graphics APIs such
+ * as OpenGL ES and Vulkan. */
+ GPU = 3,
+ /** Dedicated accelerator for Machine Learning workloads. */
+ ACCELERATOR = 4,
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
@@ -234,9 +263,6 @@
*
* For a scalar operand, dimensions.size() must be 0.
*
- * For a tensor operand, dimensions.size() must be at least 1;
- * however, any of the dimensions may be unspecified.
- *
* A tensor operand with all dimensions specified has "fully
* specified" dimensions. Whenever possible (i.e., whenever the
* dimensions are known at model construction time), a tensor
@@ -255,17 +281,20 @@
* . The operand has lifetime CONSTANT_COPY or
* CONSTANT_REFERENCE.
*
- * . The operand has lifetime MODEL_INPUT or MODEL_OUTPUT. Fully
+ * . The operand has lifetime MODEL_INPUT. Fully
* specified dimensions must either be present in the
* Operand or they must be provided in the corresponding
* RequestArgument.
- * EXCEPTION: If the input or output is optional and omitted
+ * EXCEPTION: If the input is optional and omitted
* (by setting the hasNoValue field of the corresponding
* RequestArgument to true) then it need not have fully
* specified dimensions.
*
* A tensor operand with some number of unspecified dimensions is
* represented by setting each unspecified dimension to 0.
+ *
+ * A tensor operand with unspecified rank is represented by providing
+ * an empty dimensions vector.
*/
vec<uint32_t> dimensions;
@@ -397,3 +426,255 @@
*/
bool relaxComputationFloat32toFloat16;
};
+
+/**
+ * Describes the shape information of an output operand after execution.
+ */
+struct OutputShape {
+ /**
+ * Dimensions of the operand.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * Whether the provided buffer size is sufficient for the output.
+ */
+ bool isSufficient;
+};
+
+/**
+ * Specifies whether or not to measure timing information during execution.
+ */
+enum MeasureTiming : int32_t {
+ NO = 0,
+ YES = 1,
+};
+
+/**
+
+ * Timing information measured during execution. Each time is a duration from
+ * the beginning of some task to the end of that task, including time when that
+ * task is not active (for example, preempted by some other task, or
+ * waiting for some resource to become available).
+ *
+ * Times are measured in microseconds.
+ * When a time is not available, it must be reported as UINT64_MAX.
+ */
+struct Timing {
+ /** Execution time on device (not driver, which runs on host processor). */
+ uint64_t timeOnDevice;
+ /** Execution time in driver (including time on device). */
+ uint64_t timeInDriver;
+};
+
+/**
+ * FmqRequestDatum is a single element of a serialized representation of an
+ * execution request (a {@link @1.0::Request} object and a {@link MeasureTiming}
+ * value) which is sent across FastMessageQueue.
+ *
+ * The serialized representation for a particular execution is referred to later
+ * in these descriptions as a 'packet'.
+ *
+ * FastMessageQueue can only pass HIDL-defined types that do not involve nested
+ * buffers, handles, or interfaces.
+ *
+ * The request is serialized as follows:
+ * 1) 'packetInformation'
+ * 2) For each input operand:
+ * 2.1) 'inputOperandInformation'
+ * 2.2) For each dimension element of the operand:
+ * 2.2.1) 'inputOperandDimensionValue'
+ * 3) For each output operand:
+ * 3.1) 'outputOperandInformation'
+ * 3.2) For each dimension element of the operand:
+ * 3.2.1) 'outputOperandDimensionValue'
+ * 4) For each pool:
+ * 4.1) 'poolIdentifier'
+ * 5) 'measureTiming'
+ */
+safe_union FmqRequestDatum {
+ /**
+ * Type to describe the high-level layout of the packet.
+ */
+ struct PacketInformation {
+ /**
+ * How many elements the packet contains, including the
+ * "packetInformation" datum.
+ */
+ uint32_t packetSize;
+
+ /**
+ * Number of input operands.
+ */
+ uint32_t numberOfInputOperands;
+
+ /**
+ * Number of output operands.
+ */
+ uint32_t numberOfOutputOperands;
+
+ /**
+ * Number of pool identifiers.
+ */
+ uint32_t numberOfPools;
+ };
+
+ /**
+ * Type representing the information for each operand.
+ */
+ struct OperandInformation {
+ /**
+ * If true, the argument does not have a value. This can be used for
+ * operations that take optional arguments. If true, the fields of
+ * 'location' are set to 0, 'numberOfDimensions' is set to 0, and the
+ * dimensions information is omitted from the serialization.
+ */
+ bool hasNoValue;
+
+ /**
+ * The location within one of the memory pools passed in the Request.
+ */
+ DataLocation location;
+
+ /**
+ * Number of subsequent elements that belong to the dimensions vector.
+ */
+ uint32_t numberOfDimensions;
+ };
+
+ /**
+ * packetInformation is the first element of the packet and describes the
+ * remainder of the packet.
+ */
+ PacketInformation packetInformation;
+
+ /**
+ * Information for each input operand.
+ */
+ OperandInformation inputOperandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t inputOperandDimensionValue;
+
+ /**
+ * Information for each output operand.
+ */
+ OperandInformation outputOperandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t outputOperandDimensionValue;
+
+ /**
+ * Unique identifier for a pool.
+ *
+ * A {@link @1.0::Request} passes across one or more pools of shared memory
+ * for the inputs and outputs of an execution. However, these memory pools
+ * are not able to be sent across FastMessageQueue directly. Instead, the
+ * producing side of the FMQ represents each different pool with a unique
+ * identifier, and sends this identifier across the FMQ. Whenever the
+ * consuming side of the FMQ needs the memory corresponding to this unique
+ * identifier, it can pass the identifier to
+ * {@link IBurstCallback::getMemories} to retreive the memory. Although this
+ * HIDL Binder call is expensive compared to communication across FMQ, it is
+ * only needed in the cases when the consumer does not recognize the unique
+ * identifier.
+ */
+ int32_t poolIdentifier;
+
+ /**
+ * Specifies whether or not to measure duration of the execution. The
+ * duration runs from the time the driver dequeues the request from a
+ * FastMessageQueue to the time the driver enqueues results to a
+ * FastMessageQueue.
+ */
+ MeasureTiming measureTiming;
+};
+
+/**
+ * FmqResultDatum is a single element of a serialized representation of the
+ * values returned from an execution ({@link @1.0::ErrorStatus},
+ * vec<{@link OutputShape}>, and {@link Timing}) which is returned via
+ * FastMessageQueue.
+ *
+ * The serialized representation for a particular execution is referred to later
+ * in these descriptions as a 'packet'.
+ *
+ * FastMessageQueue can only pass HIDL-defined types that do not involve nested
+ * buffers, handles, or interfaces.
+ *
+ * The execution return values ({@link @1.0::ErrorStatus} and
+ * vec<{@link OutputShape}>) are serialized as follows:
+ * 1) 'packetInformation'
+ * 2) For each returned operand:
+ * 2.1) 'operandInformation'
+ * 2.2) For each dimension element of the operand:
+ * 2.2.1) 'operandDimensionValue'
+ * 3) 'executionTiming'
+ */
+safe_union FmqResultDatum {
+ /**
+ * Type to describe the high-level layout of the packet.
+ */
+ struct PacketInformation {
+ /**
+ * How many elements the packet contains, including the
+ * "packetInformation" datum.
+ */
+ uint32_t packetSize;
+
+ /**
+ * Status of the execution.
+ */
+ ErrorStatus errorStatus;
+
+ /**
+ * Number of returned operands.
+ */
+ uint32_t numberOfOperands;
+ };
+
+ /**
+ * Type representing the information for each operand.
+ */
+ struct OperandInformation {
+ /**
+ * Indicates whether the operand's output buffer is large enough to
+ * store the operand's result data.
+ */
+ bool isSufficient;
+
+ /**
+ * Number of subsequent elements that belong to the dimensions vector.
+ */
+ uint32_t numberOfDimensions;
+ };
+
+ /**
+ * packetInformation is the first element of the packet and describes the
+ * remainder of the packet. It additionally includes the status of the
+ * execution.
+ */
+ PacketInformation packetInformation;
+
+ /**
+ * Information for each returned operand.
+ */
+ OperandInformation operandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t operandDimensionValue;
+
+ /**
+ * Duration of execution. Unless measurement was requested and execution
+ * succeeds, all times must be reported as UINT64_MAX. A driver may choose
+ * to report any time as UINT64_MAX, indicating that measurement is not
+ * available.
+ */
+ Timing executionTiming;
+};
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 0f3ddc4..0cb9e16 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -20,7 +20,11 @@
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
srcs: [
"GeneratedTestsV1_0.cpp",
- ]
+ ],
+ cflags: [
+ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
+ ],
+ test_suites: ["general-tests"],
}
// Tests for V1_1 models using the V1_2 HAL.
@@ -30,6 +34,10 @@
srcs: [
"GeneratedTestsV1_1.cpp",
],
+ cflags: [
+ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
+ ],
+ test_suites: ["general-tests"],
}
// Tests for V1_2 models.
@@ -40,5 +48,8 @@
"BasicTests.cpp",
"GeneratedTests.cpp",
],
+ cflags: [
+ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
+ ],
test_suites: ["general-tests"],
}
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index eb3ebd3..8c3ad15 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -45,6 +45,16 @@
});
EXPECT_TRUE(ret.isOk());
}
+
+// device type test
+TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) {
+ Return<void> ret = device->getType([](ErrorStatus status, DeviceType type) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
+ type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
+ });
+ EXPECT_TRUE(ret.isOk());
+}
} // namespace functional
} // namespace vts
} // namespace V1_2
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index 9bff09c..4bc891f 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -33,7 +33,8 @@
namespace generated_tests {
using ::test_helper::MixedTypedExample;
extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&,
+ bool testDynamicOutputShape = false);
} // namespace generated_tests
namespace V1_2 {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 56a61d4..956926a 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -33,7 +33,8 @@
namespace generated_tests {
using ::test_helper::MixedTypedExample;
extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&,
+ bool testDynamicOutputShape = false);
} // namespace generated_tests
namespace V1_2 {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index 1c781ec..425690f 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -33,7 +33,8 @@
namespace generated_tests {
using ::test_helper::MixedTypedExample;
extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&,
+ bool testDynamicOutputShape = false);
} // namespace generated_tests
namespace V1_2 {
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index b1a0e53..bee2556 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -172,6 +172,9 @@
static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
+ if (invalidRank == 0) {
+ continue;
+ }
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
" has rank of " + std::to_string(invalidRank);
validate(device, message, model, [operand, invalidRank](Model* model) {
@@ -328,6 +331,8 @@
// - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
// - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
// - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
switch (operation.type) {
case OperationType::LSH_PROJECTION: {
if (operand == operation.inputs[1]) {
@@ -347,6 +352,8 @@
return true;
}
} break;
+ case OperationType::TRANSPOSE_CONV_2D:
+ case OperationType::GROUPED_CONV_2D:
case OperationType::DEPTHWISE_CONV_2D:
case OperationType::CONV_2D: {
if (operand == 1 && (type == OperandType::TENSOR_QUANT8_ASYMM ||
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index d80fbcf..00a7c3e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -42,6 +42,10 @@
///////////////////////// UTILITY FUNCTIONS /////////////////////////
+static bool badTiming(Timing timing) {
+ return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
+}
+
static void createPreparedModel(const sp<IDevice>& device, const Model& model,
sp<IPreparedModel>* preparedModel) {
ASSERT_NE(nullptr, preparedModel);
@@ -98,27 +102,47 @@
Request request, const std::function<void(Request*)>& mutation) {
mutation(&request);
+ // We'd like to test both with timing requested and without timing
+ // requested. Rather than running each test both ways, we'll decide whether
+ // to request timing by hashing the message. We do not use std::hash because
+ // it is not guaranteed stable across executions.
+ char hash = 0;
+ for (auto c : message) {
+ hash ^= c;
+ };
+ MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
+
{
SCOPED_TRACE(message + " [execute_1_2]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executeLaunchStatus =
- preparedModel->execute_1_2(request, executionCallback);
+ preparedModel->execute_1_2(request, measure, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
+ const auto& outputShapes = executionCallback->getOutputShapes();
+ Timing timing = executionCallback->getTiming();
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
+ ASSERT_EQ(outputShapes.size(), 0);
+ ASSERT_TRUE(badTiming(timing));
}
{
SCOPED_TRACE(message + " [executeSynchronously]");
- Return<ErrorStatus> executeStatus = preparedModel->executeSynchronously(request);
+ Return<void> executeStatus = preparedModel->executeSynchronously(
+ request, measure,
+ [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) {
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ });
ASSERT_TRUE(executeStatus.isOk());
- ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
}
}
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index dedab8d..c0c21bd 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -79,6 +79,9 @@
// Tag for the generated tests
class GeneratedTest : public NeuralnetworksHidlTest {};
+// Tag for the dynamic output shape tests
+class DynamicOutputShapeTest : public NeuralnetworksHidlTest {};
+
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_2(
const sp<V1_2::implementation::PreparedModelCallback>& callback);
diff --git a/radio/1.4/IRadio.hal b/radio/1.4/IRadio.hal
index 3fe608f..5124d0e 100644
--- a/radio/1.4/IRadio.hal
+++ b/radio/1.4/IRadio.hal
@@ -105,14 +105,14 @@
oneway setDataProfile_1_4(int32_t serial, vec<DataProfileInfo> profiles);
/**
- * Initiate emergency voice call, with zero or more emergency service category(s) and routing
- * information for handling the call. Android uses this request to make its emergency call
- * instead of using @1.0::IRadio.dial if the 'address' in the 'dialInfo' field is identified
- * as an emergency number by Android.
+ * Initiate emergency voice call, with zero or more emergency service category(s), zero or
+ * more emergency Uniform Resource Names (URN), and routing information for handling the call.
+ * Android uses this request to make its emergency call instead of using @1.0::IRadio.dial
+ * if the 'address' in the 'dialInfo' field is identified as an emergency number by Android.
*
- * In multi-sim senario, this radio request is sent through the IRadio service that serves
- * the subscription the emergency number belongs to, no matter of the PUK/PIN state of the
- * subscription and the service state.
+ * In multi-sim scenario, if the emergency number is from a specific subscription, this radio
+ * request is sent through the IRadio service that serves the subscription, no matter of the
+ * PUK/PIN state of the subscription and the service state of the radio.
*
* Some countries or carriers require some emergency numbers that must be handled with normal
* call routing or emergency routing. If the 'routing' field is specified as
@@ -122,22 +122,29 @@
* @1.4::EmergencyNumberRouting#UNKNOWN, Android does not know how to handle the call.
*
* If the dialed emergency number does not have a specified emergency service category, the
- * 'categories' field is set to @1.4::EmergencyServiceCategory#UNSPECIFIED; if the underlying
- * technology used to request emergency services does not support the emergency service
- * category, the categories may be ignored.
+ * 'categories' field is set to @1.4::EmergencyServiceCategory#UNSPECIFIED; if the dialed
+ * emergency number does not have specified emergency Uniform Resource Names, the 'urns' field
+ * is set to an empty list. If the underlying technology used to request emergency services
+ * does not support the emergency service category or emergency uniform resource names, the
+ * field 'categories' or 'urns' may be ignored.
*
- * Reference: 3gpp TS 22.101, Section 10 - Emergency Calls
+ * Reference: 3gpp 22.101, Section 10 - Emergency Calls;
+ * 3gpp 23.167, Section 6 - Functional description;
+ * 3gpp 24.503, Section 5.1.6.8.1 - General;
+ * RFC 5031
*
* @param serial Serial number of request.
* @param dialInfo the same @1.0::Dial information used by @1.0::IRadio.dial.
* @param categories bitfield<@1.4::EmergencyServiceCategory> the Emergency Service Category(s)
* of the call.
+ * @param urns the emergency Uniform Resource Names (URN)
* @param routing @1.4::EmergencyCallRouting the emergency call routing information.
*
* Response function is IRadioResponse.emergencyDialResponse()
*/
oneway emergencyDial(int32_t serial, Dial dialInfo,
- bitfield<EmergencyServiceCategory> categories, EmergencyCallRouting routing);
+ bitfield<EmergencyServiceCategory> categories, vec<string> urns,
+ EmergencyCallRouting routing);
/**
* Starts a network scan
diff --git a/radio/1.4/IRadioIndication.hal b/radio/1.4/IRadioIndication.hal
index a58d19c..58b7b70 100644
--- a/radio/1.4/IRadioIndication.hal
+++ b/radio/1.4/IRadioIndication.hal
@@ -27,22 +27,24 @@
* Report the current list of emergency numbers
*
* Each emergency number (@1.4::EmergencyNumber) in the emergency number list contains a
- * dialing number, zero or more service category(s), mobile country code, mobile network code,
- * and source(s) that indicate where it comes from.
+ * dialing number, zero or more service category(s), zero or more emergency uniform resource
+ * names, mobile country code, mobile network code, and source(s) that indicate where it comes
+ * from.
*
* Radio must report all the valid emergency numbers with known mobile country code, mobile
- * network code and emergency service categories from all available sources including network
- * signaling, sim, modem/oem configuration, and default configuration (112 and 911 must be
- * always available; additionally, 000, 08, 110, 999, 118 and 119 must be available when sim
- * is not present). Radio shall not report emergency numbers that are invalid in the current
- * locale. The reported emergency number list must not have duplicate @1.4::EmergencyNumber
- * entries. Please refer the documentation of @1.4::EmergencyNumber to construct each
- * emergency number to report.
+ * network code, emergency service categories, and emergency uniform resource names from all
+ * available sources including network signaling, sim, modem/oem configuration, and default
+ * configuration (112 and 911 must be always available; additionally, 000, 08, 110, 999, 118
+ * and 119 must be available when sim is not present). Radio shall not report emergency numbers
+ * that are invalid in the current locale. The reported emergency number list must not have
+ * duplicate @1.4::EmergencyNumber entries. Please refer the documentation of
+ * @1.4::EmergencyNumber to construct each emergency number to report.
*
* Radio must report the complete list of emergency numbers whenever the emergency numbers in
* the list are changed or whenever the client and the radio server are connected.
*
- * Reference: 3gpp 22.101, Section 10 - Emergency Calls
+ * Reference: 3gpp 22.101, Section 10 - Emergency Calls;
+ * 3gpp 24.008, Section 9.2.13.4 - Emergency Number List
*
* @param type Type of radio indication
* @param emergencyNumberList Current list of emergency numbers known to radio.
diff --git a/radio/1.4/types.hal b/radio/1.4/types.hal
index de7befc..6fa7ebd 100644
--- a/radio/1.4/types.hal
+++ b/radio/1.4/types.hal
@@ -54,20 +54,27 @@
};
/**
- * Emergency number contains information of number, one or more service category(s), mobile country
- * code (mcc), mobile network country (mnc) and source(s) that indicate where it comes from.
+ * Emergency number contains information of number, one or more service category(s), zero or more
+ * emergency uniform resource names, mobile country code (mcc), mobile network country (mnc) and
+ * source(s) that indicate where it comes from.
*
- * If the source of the emergency number is associated with country, field ‘mcc’ must be provided;
- * otherwise the field ‘mcc’ must be an empty string.
+ * If the emergency number is associated with country, field ‘mcc’ must be provided, otherwise
+ * field ‘mcc’ must be an empty string. If the emergency number is associated with network
+ * operator, field ‘mcc’ and 'mnc' must be provided, otherwise field ‘mnc’ must be an empty
+ * string. If the emergency number is specified with emergency service category(s), field
+ * 'categories' must be provided, otherwise field 'categories' must be
+ * @1.4::EmergencyServiceCategories::UNSPECIFIED. If the emergency number is specified with
+ * emergency uniform resource names (URN), field 'urns' must be provided, otherwise field 'urns'
+ * must be an empty list.
*
- * If the source of the emergency number is associated with network operator, field ‘mcc’ and
- * 'mnc' must be provided; otherwise the field ‘mnc’ must be an empty string.
+ * A unique EmergencyNumber has a unique combination of ‘number’, ‘mcc’, 'mnc', 'categories' and
+ * 'urns' fields. Multiple @1.4::EmergencyNumberSource should be merged into one 'sources' field
+ * via bitwise-OR combination for the same EmergencyNumber.
*
- * A unique EmergencyNumber has a unique combination of ‘number’, ‘mcc’, 'mnc' and 'categories'
- * fields. Multiple @1.4::EmergencyNumberSource should be merged into the bitfield for the same
- * EmergencyNumber.
- *
- * Reference: 3GPP TS 22.101 version 9.1.0 Release 9
+ * Reference: 3gpp 22.101, Section 10 - Emergency Calls;
+ * 3gpp 23.167, Section 6 - Functional description;
+ * 3gpp 24.503, Section 5.1.6.8.1 - General;
+ * RFC 5031
*/
struct EmergencyNumber{
/**
@@ -89,6 +96,10 @@
*/
bitfield<EmergencyServiceCategory> categories;
/**
+ * The list of emergency Uniform Resource Names (URN).
+ */
+ vec<string> urns;
+ /**
* The bitfield of @1.4::EmergencyNumberSource(s). See @1.4::EmergencyNumberSource for the
* value of each bit.
*/