Merge "Fix strict weak ordering requirement of less than operation"
diff --git a/camera/device/3.4/default/ExternalCameraDevice.cpp b/camera/device/3.4/default/ExternalCameraDevice.cpp
index e7361dd..38a78e0 100644
--- a/camera/device/3.4/default/ExternalCameraDevice.cpp
+++ b/camera/device/3.4/default/ExternalCameraDevice.cpp
@@ -38,9 +38,8 @@
// Other formats to consider in the future:
// * V4L2_PIX_FMT_YVU420 (== YV12)
// * V4L2_PIX_FMT_YVYU (YVYU: can be converted to YV12 or other YUV420_888 formats)
-const std::array<uint32_t, /*size*/1> kSupportedFourCCs {{
- V4L2_PIX_FMT_MJPEG
-}}; // double braces required in C++11
+const std::array<uint32_t, /*size*/ 2> kSupportedFourCCs{
+ {V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_Z16}}; // double braces required in C++11
constexpr int MAX_RETRY = 5; // Allow retry v4l2 open failures a few times.
constexpr int OPEN_RETRY_SLEEP_US = 100000; // 100ms * MAX_RETRY = 0.5 seconds
@@ -224,6 +223,13 @@
mCameraCharacteristics.clear();
return ret;
}
+
+ ret = initAvailableCapabilities(&mCameraCharacteristics);
+ if (ret != OK) {
+ ALOGE("%s: init available capabilities key failed: errorno %d", __FUNCTION__, ret);
+ mCameraCharacteristics.clear();
+ return ret;
+ }
}
return OK;
}
@@ -237,6 +243,39 @@
} \
} while (0)
+status_t ExternalCameraDevice::initAvailableCapabilities(
+ ::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata) {
+
+ if (mSupportedFormats.empty()) {
+ ALOGE("%s: Supported formats list is empty", __FUNCTION__);
+ return UNKNOWN_ERROR;
+ }
+
+ bool hasDepth = false;
+ bool hasColor = false;
+ for (const auto& fmt : mSupportedFormats) {
+ switch (fmt.fourcc) {
+ case V4L2_PIX_FMT_Z16: hasDepth = true; break;
+ case V4L2_PIX_FMT_MJPEG: hasColor = true; break;
+ default: ALOGW("%s: Unsupported format found", __FUNCTION__);
+ }
+ }
+
+ std::vector<uint8_t> availableCapabilities;
+ if (hasDepth) {
+ availableCapabilities.push_back(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT);
+ }
+ if (hasColor) {
+ availableCapabilities.push_back(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+ }
+ if(!availableCapabilities.empty()) {
+ UPDATE(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, availableCapabilities.data(),
+ availableCapabilities.size());
+ }
+
+ return OK;
+}
+
status_t ExternalCameraDevice::initDefaultCharsKeys(
::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata) {
const uint8_t hardware_level = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
@@ -323,12 +362,6 @@
&noiseReductionMode, 1);
UPDATE(ANDROID_NOISE_REDUCTION_MODE, &noiseReductionMode, 1);
- // android.request
- const uint8_t availableCapabilities[] = {
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE};
- UPDATE(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, availableCapabilities,
- ARRAY_SIZE(availableCapabilities));
-
const int32_t partialResultCount = 1;
UPDATE(ANDROID_REQUEST_PARTIAL_RESULT_COUNT, &partialResultCount, 1);
@@ -576,9 +609,11 @@
return OK;
}
-status_t ExternalCameraDevice::initOutputCharsKeys(int fd,
- ::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata) {
- initSupportedFormatsLocked(fd);
+template <size_t SIZE>
+status_t ExternalCameraDevice::initOutputCharskeysByFormat(
+ ::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata,
+ uint32_t fourcc, const std::array<int, SIZE>& halFormats,
+ int streamConfigTag, int streamConfiguration, int minFrameDuration, int stallDuration) {
if (mSupportedFormats.empty()) {
ALOGE("%s: Init supported format list failed", __FUNCTION__);
return UNKNOWN_ERROR;
@@ -587,22 +622,17 @@
std::vector<int32_t> streamConfigurations;
std::vector<int64_t> minFrameDurations;
std::vector<int64_t> stallDurations;
- int32_t maxFps = std::numeric_limits<int32_t>::min();
- int32_t minFps = std::numeric_limits<int32_t>::max();
- std::set<int32_t> framerates;
-
- std::array<int, /*size*/3> halFormats{{
- HAL_PIXEL_FORMAT_BLOB,
- HAL_PIXEL_FORMAT_YCbCr_420_888,
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}};
for (const auto& supportedFormat : mSupportedFormats) {
+ if (supportedFormat.fourcc != fourcc) {
+ // Skip 4CCs not meant for the halFormats
+ continue;
+ }
for (const auto& format : halFormats) {
streamConfigurations.push_back(format);
streamConfigurations.push_back(supportedFormat.width);
streamConfigurations.push_back(supportedFormat.height);
- streamConfigurations.push_back(
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+ streamConfigurations.push_back(streamConfigTag);
}
int64_t minFrameDuration = std::numeric_limits<int64_t>::max();
@@ -614,14 +644,6 @@
if (frameDuration < minFrameDuration) {
minFrameDuration = frameDuration;
}
- int32_t frameRateInt = static_cast<int32_t>(fr.getDouble());
- if (minFps > frameRateInt) {
- minFps = frameRateInt;
- }
- if (maxFps < frameRateInt) {
- maxFps = frameRateInt;
- }
- framerates.insert(frameRateInt);
}
for (const auto& format : halFormats) {
@@ -645,6 +667,30 @@
}
}
+ UPDATE(streamConfiguration, streamConfigurations.data(), streamConfigurations.size());
+
+ UPDATE(minFrameDuration, minFrameDurations.data(), minFrameDurations.size());
+
+ UPDATE(stallDuration, stallDurations.data(), stallDurations.size());
+
+ return true;
+}
+
+bool ExternalCameraDevice::calculateMinFps(
+ ::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata) {
+ std::set<int32_t> framerates;
+ int32_t minFps = std::numeric_limits<int32_t>::max();
+
+ for (const auto& supportedFormat : mSupportedFormats) {
+ for (const auto& fr : supportedFormat.frameRates) {
+ int32_t frameRateInt = static_cast<int32_t>(fr.getDouble());
+ if (minFps > frameRateInt) {
+ minFps = frameRateInt;
+ }
+ framerates.insert(frameRateInt);
+ }
+ }
+
std::vector<int32_t> fpsRanges;
// FPS ranges
for (const auto& framerate : framerates) {
@@ -658,17 +704,60 @@
UPDATE(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, fpsRanges.data(),
fpsRanges.size());
- UPDATE(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- streamConfigurations.data(), streamConfigurations.size());
-
- UPDATE(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- minFrameDurations.data(), minFrameDurations.size());
-
- UPDATE(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, stallDurations.data(),
- stallDurations.size());
-
UPDATE(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, &maxFrameDuration, 1);
+ return true;
+}
+
+status_t ExternalCameraDevice::initOutputCharsKeys(
+ int fd, ::android::hardware::camera::common::V1_0::helper::CameraMetadata* metadata) {
+ initSupportedFormatsLocked(fd);
+ if (mSupportedFormats.empty()) {
+ ALOGE("%s: Init supported format list failed", __FUNCTION__);
+ return UNKNOWN_ERROR;
+ }
+
+ bool hasDepth = false;
+ bool hasColor = false;
+
+ // For V4L2_PIX_FMT_Z16
+ std::array<int, /*size*/ 1> halDepthFormats{{HAL_PIXEL_FORMAT_Y16}};
+ // For V4L2_PIX_FMT_MJPEG
+ std::array<int, /*size*/ 3> halFormats{{HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_YCbCr_420_888,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}};
+
+ for (const auto& supportedFormat : mSupportedFormats) {
+ switch (supportedFormat.fourcc) {
+ case V4L2_PIX_FMT_Z16:
+ hasDepth = true;
+ break;
+ case V4L2_PIX_FMT_MJPEG:
+ hasColor = true;
+ break;
+ default:
+ ALOGW("%s: format %c%c%c%c is not supported!", __FUNCTION__,
+ supportedFormat.fourcc & 0xFF, (supportedFormat.fourcc >> 8) & 0xFF,
+ (supportedFormat.fourcc >> 16) & 0xFF, (supportedFormat.fourcc >> 24) & 0xFF);
+ }
+ }
+
+ if (hasDepth) {
+ initOutputCharskeysByFormat(metadata, V4L2_PIX_FMT_Z16, halDepthFormats,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS);
+ }
+ if (hasColor) {
+ initOutputCharskeysByFormat(metadata, V4L2_PIX_FMT_MJPEG, halFormats,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
+ }
+
+ calculateMinFps(metadata);
+
SupportedV4L2Format maximumFormat {.width = 0, .height = 0};
for (const auto& supportedFormat : mSupportedFormats) {
if (supportedFormat.width >= maximumFormat.width &&
@@ -790,11 +879,12 @@
sortedFmts = out;
}
-std::vector<SupportedV4L2Format>
-ExternalCameraDevice::getCandidateSupportedFormatsLocked(
- int fd, CroppingType cropType,
- const std::vector<ExternalCameraConfig::FpsLimitation>& fpsLimits,
- const Size& minStreamSize) {
+std::vector<SupportedV4L2Format> ExternalCameraDevice::getCandidateSupportedFormatsLocked(
+ int fd, CroppingType cropType,
+ const std::vector<ExternalCameraConfig::FpsLimitation>& fpsLimits,
+ const std::vector<ExternalCameraConfig::FpsLimitation>& depthFpsLimits,
+ const Size& minStreamSize,
+ bool depthEnabled) {
std::vector<SupportedV4L2Format> outFmts;
struct v4l2_fmtdesc fmtdesc {
.index = 0,
@@ -840,28 +930,10 @@
.fourcc = fmtdesc.pixelformat
};
- double fpsUpperBound = -1.0;
- for (const auto& limit : fpsLimits) {
- if (cropType == VERTICAL) {
- if (format.width <= limit.size.width) {
- fpsUpperBound = limit.fpsUpperBound;
- break;
- }
- } else { // HORIZONTAL
- if (format.height <= limit.size.height) {
- fpsUpperBound = limit.fpsUpperBound;
- break;
- }
- }
-
- }
- if (fpsUpperBound < 0.f) {
- continue;
- }
-
- getFrameRateList(fd, fpsUpperBound, &format);
- if (!format.frameRates.empty()) {
- outFmts.push_back(format);
+ if (format.fourcc == V4L2_PIX_FMT_Z16 && depthEnabled) {
+ updateFpsBounds(fd, cropType, depthFpsLimits, format, outFmts);
+ } else {
+ updateFpsBounds(fd, cropType, fpsLimits, format, outFmts);
}
}
}
@@ -873,12 +945,39 @@
return outFmts;
}
-void ExternalCameraDevice::initSupportedFormatsLocked(int fd) {
+void ExternalCameraDevice::updateFpsBounds(
+ int fd, CroppingType cropType,
+ const std::vector<ExternalCameraConfig::FpsLimitation>& fpsLimits, SupportedV4L2Format format,
+ std::vector<SupportedV4L2Format>& outFmts) {
+ double fpsUpperBound = -1.0;
+ for (const auto& limit : fpsLimits) {
+ if (cropType == VERTICAL) {
+ if (format.width <= limit.size.width) {
+ fpsUpperBound = limit.fpsUpperBound;
+ break;
+ }
+ } else { // HORIZONTAL
+ if (format.height <= limit.size.height) {
+ fpsUpperBound = limit.fpsUpperBound;
+ break;
+ }
+ }
+ }
+ if (fpsUpperBound < 0.f) {
+ return;
+ }
- std::vector<SupportedV4L2Format> horizontalFmts =
- getCandidateSupportedFormatsLocked(fd, HORIZONTAL, mCfg.fpsLimits, mCfg.minStreamSize);
- std::vector<SupportedV4L2Format> verticalFmts =
- getCandidateSupportedFormatsLocked(fd, VERTICAL, mCfg.fpsLimits, mCfg.minStreamSize);
+ getFrameRateList(fd, fpsUpperBound, &format);
+ if (!format.frameRates.empty()) {
+ outFmts.push_back(format);
+ }
+}
+
+void ExternalCameraDevice::initSupportedFormatsLocked(int fd) {
+ std::vector<SupportedV4L2Format> horizontalFmts = getCandidateSupportedFormatsLocked(
+ fd, HORIZONTAL, mCfg.fpsLimits, mCfg.depthFpsLimits, mCfg.minStreamSize, mCfg.depthEnabled);
+ std::vector<SupportedV4L2Format> verticalFmts = getCandidateSupportedFormatsLocked(
+ fd, VERTICAL, mCfg.fpsLimits, mCfg.depthFpsLimits, mCfg.minStreamSize, mCfg.depthEnabled);
size_t horiSize = horizontalFmts.size();
size_t vertSize = verticalFmts.size();
diff --git a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
index dce40ff..a12d8e4 100644
--- a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
+++ b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
@@ -1724,7 +1724,7 @@
return false;
};
- if (req->frameIn->mFourcc != V4L2_PIX_FMT_MJPEG) {
+ if (req->frameIn->mFourcc != V4L2_PIX_FMT_MJPEG && req->frameIn->mFourcc != V4L2_PIX_FMT_Z16) {
return onDeviceError("%s: do not support V4L2 format %c%c%c%c", __FUNCTION__,
req->frameIn->mFourcc & 0xFF,
(req->frameIn->mFourcc >> 8) & 0xFF,
@@ -1743,29 +1743,26 @@
}
// TODO: in some special case maybe we can decode jpg directly to gralloc output?
- ATRACE_BEGIN("MJPGtoI420");
- int res = libyuv::MJPGToI420(
- inData, inDataSize,
- static_cast<uint8_t*>(mYu12FrameLayout.y),
- mYu12FrameLayout.yStride,
- static_cast<uint8_t*>(mYu12FrameLayout.cb),
- mYu12FrameLayout.cStride,
- static_cast<uint8_t*>(mYu12FrameLayout.cr),
- mYu12FrameLayout.cStride,
- mYu12Frame->mWidth, mYu12Frame->mHeight,
- mYu12Frame->mWidth, mYu12Frame->mHeight);
- ATRACE_END();
+ if (req->frameIn->mFourcc == V4L2_PIX_FMT_MJPEG) {
+ ATRACE_BEGIN("MJPGtoI420");
+ int res = libyuv::MJPGToI420(
+ inData, inDataSize, static_cast<uint8_t*>(mYu12FrameLayout.y), mYu12FrameLayout.yStride,
+ static_cast<uint8_t*>(mYu12FrameLayout.cb), mYu12FrameLayout.cStride,
+ static_cast<uint8_t*>(mYu12FrameLayout.cr), mYu12FrameLayout.cStride,
+ mYu12Frame->mWidth, mYu12Frame->mHeight, mYu12Frame->mWidth, mYu12Frame->mHeight);
+ ATRACE_END();
- if (res != 0) {
- // For some webcam, the first few V4L2 frames might be malformed...
- ALOGE("%s: Convert V4L2 frame to YU12 failed! res %d", __FUNCTION__, res);
- lk.unlock();
- Status st = parent->processCaptureRequestError(req);
- if (st != Status::OK) {
- return onDeviceError("%s: failed to process capture request error!", __FUNCTION__);
+ if (res != 0) {
+ // For some webcam, the first few V4L2 frames might be malformed...
+ ALOGE("%s: Convert V4L2 frame to YU12 failed! res %d", __FUNCTION__, res);
+ lk.unlock();
+ Status st = parent->processCaptureRequestError(req);
+ if (st != Status::OK) {
+ return onDeviceError("%s: failed to process capture request error!", __FUNCTION__);
+ }
+ signalRequestDone();
+ return true;
}
- signalRequestDone();
- return true;
}
ALOGV("%s processing new request", __FUNCTION__);
@@ -1796,6 +1793,16 @@
__FUNCTION__, ret);
}
} break;
+ case PixelFormat::Y16: {
+ void* outLayout = sHandleImporter.lock(*(halBuf.bufPtr), halBuf.usage, inDataSize);
+
+ std::memcpy(outLayout, inData, inDataSize);
+
+ int relFence = sHandleImporter.unlock(*(halBuf.bufPtr));
+ if (relFence >= 0) {
+ halBuf.acquireFence = relFence;
+ }
+ } break;
case PixelFormat::YCBCR_420_888:
case PixelFormat::YV12: {
IMapper::Rect outRect {0, 0,
@@ -2063,11 +2070,6 @@
return false;
}
- if (ds & Dataspace::DEPTH) {
- ALOGI("%s: does not support depth output", __FUNCTION__);
- return false;
- }
-
switch (fmt) {
case PixelFormat::BLOB:
if (ds != static_cast<int32_t>(Dataspace::V0_JFIF)) {
@@ -2081,6 +2083,16 @@
// TODO: check what dataspace we can support here.
// intentional no-ops.
break;
+ case PixelFormat::Y16:
+ if (!mCfg.depthEnabled) {
+ ALOGI("%s: Depth is not Enabled", __FUNCTION__);
+ return false;
+ }
+ if (!(ds & Dataspace::DEPTH)) {
+ ALOGI("%s: Y16 supports only dataSpace DEPTH", __FUNCTION__);
+ return false;
+ }
+ break;
default:
ALOGI("%s: does not support format %x", __FUNCTION__, fmt);
return false;
@@ -2609,6 +2621,7 @@
case PixelFormat::BLOB:
case PixelFormat::YCBCR_420_888:
case PixelFormat::YV12: // Used by SurfaceTexture
+ case PixelFormat::Y16:
// No override
out->streams[i].v3_2.overrideFormat = config.streams[i].format;
break;
diff --git a/camera/device/3.4/default/ExternalCameraUtils.cpp b/camera/device/3.4/default/ExternalCameraUtils.cpp
index 680c95a..a07c629 100644
--- a/camera/device/3.4/default/ExternalCameraUtils.cpp
+++ b/camera/device/3.4/default/ExternalCameraUtils.cpp
@@ -21,7 +21,6 @@
#include <sys/mman.h>
#include <linux/videodev2.h>
#include "ExternalCameraUtils.h"
-#include "tinyxml2.h" // XML parsing
namespace android {
namespace hardware {
@@ -243,28 +242,28 @@
if (fpsList == nullptr) {
ALOGI("%s: no fps list specified", __FUNCTION__);
} else {
- std::vector<FpsLimitation> limits;
- XMLElement *row = fpsList->FirstChildElement("Limit");
- while (row != nullptr) {
- FpsLimitation prevLimit {{0, 0}, 1000.0};
- FpsLimitation limit;
- limit.size = {
- row->UnsignedAttribute("width", /*Default*/0),
- row->UnsignedAttribute("height", /*Default*/0)};
- limit.fpsUpperBound = row->DoubleAttribute("fpsBound", /*Default*/1000.0);
- if (limit.size.width <= prevLimit.size.width ||
- limit.size.height <= prevLimit.size.height ||
- limit.fpsUpperBound >= prevLimit.fpsUpperBound) {
- ALOGE("%s: FPS limit list must have increasing size and decreasing fps!"
- " Prev %dx%d@%f, Current %dx%d@%f", __FUNCTION__,
- prevLimit.size.width, prevLimit.size.height, prevLimit.fpsUpperBound,
- limit.size.width, limit.size.height, limit.fpsUpperBound);
+ if (!updateFpsList(fpsList, ret.fpsLimits)) {
+ return ret;
+ }
+ }
+
+ XMLElement *depth = deviceCfg->FirstChildElement("Depth16Supported");
+ if (depth == nullptr) {
+ ret.depthEnabled = false;
+ ALOGI("%s: depth output is not enabled", __FUNCTION__);
+ } else {
+ ret.depthEnabled = depth->BoolAttribute("enabled", false);
+ }
+
+ if(ret.depthEnabled) {
+ XMLElement *depthFpsList = deviceCfg->FirstChildElement("DepthFpsList");
+ if (depthFpsList == nullptr) {
+ ALOGW("%s: no depth fps list specified", __FUNCTION__);
+ } else {
+ if(!updateFpsList(depthFpsList, ret.depthFpsLimits)) {
return ret;
}
- limits.push_back(limit);
- row = row->NextSiblingElement("Limit");
}
- ret.fpsLimits = limits;
}
XMLElement *minStreamSize = deviceCfg->FirstChildElement("MinimumStreamSize");
@@ -284,15 +283,48 @@
ALOGI("%s: fpsLimitList: %dx%d@%f", __FUNCTION__,
limit.size.width, limit.size.height, limit.fpsUpperBound);
}
+ for (const auto& limit : ret.depthFpsLimits) {
+ ALOGI("%s: depthFpsLimitList: %dx%d@%f", __FUNCTION__, limit.size.width, limit.size.height,
+ limit.fpsUpperBound);
+ }
ALOGI("%s: minStreamSize: %dx%d" , __FUNCTION__,
ret.minStreamSize.width, ret.minStreamSize.height);
return ret;
}
+bool ExternalCameraConfig::updateFpsList(tinyxml2::XMLElement* fpsList,
+ std::vector<FpsLimitation>& fpsLimits) {
+ using namespace tinyxml2;
+ std::vector<FpsLimitation> limits;
+ XMLElement* row = fpsList->FirstChildElement("Limit");
+ while (row != nullptr) {
+ FpsLimitation prevLimit{{0, 0}, 1000.0};
+ FpsLimitation limit;
+ limit.size = {row->UnsignedAttribute("width", /*Default*/ 0),
+ row->UnsignedAttribute("height", /*Default*/ 0)};
+ limit.fpsUpperBound = row->DoubleAttribute("fpsBound", /*Default*/ 1000.0);
+ if (limit.size.width <= prevLimit.size.width ||
+ limit.size.height <= prevLimit.size.height ||
+ limit.fpsUpperBound >= prevLimit.fpsUpperBound) {
+ ALOGE(
+ "%s: FPS limit list must have increasing size and decreasing fps!"
+ " Prev %dx%d@%f, Current %dx%d@%f",
+ __FUNCTION__, prevLimit.size.width, prevLimit.size.height, prevLimit.fpsUpperBound,
+ limit.size.width, limit.size.height, limit.fpsUpperBound);
+ return false;
+ }
+ limits.push_back(limit);
+ row = row->NextSiblingElement("Limit");
+ }
+ fpsLimits = limits;
+ return true;
+}
+
ExternalCameraConfig::ExternalCameraConfig() :
maxJpegBufSize(kDefaultJpegBufSize),
numVideoBuffers(kDefaultNumVideoBuffer),
- numStillBuffers(kDefaultNumStillBuffer) {
+ numStillBuffers(kDefaultNumStillBuffer),
+ depthEnabled(false) {
fpsLimits.push_back({/*Size*/{ 640, 480}, /*FPS upper bound*/30.0});
fpsLimits.push_back({/*Size*/{1280, 720}, /*FPS upper bound*/7.5});
fpsLimits.push_back({/*Size*/{1920, 1080}, /*FPS upper bound*/5.0});
diff --git a/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraDevice_3_4.h b/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraDevice_3_4.h
index ff0cfb3..28b9cef 100644
--- a/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraDevice_3_4.h
+++ b/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraDevice_3_4.h
@@ -82,6 +82,9 @@
void initSupportedFormatsLocked(int fd);
status_t initCameraCharacteristics();
+ // Init available capabilities keys
+ status_t initAvailableCapabilities(
+ ::android::hardware::camera::common::V1_0::helper::CameraMetadata*);
// Init non-device dependent keys
status_t initDefaultCharsKeys(::android::hardware::camera::common::V1_0::helper::CameraMetadata*);
// Init camera control chars keys. Caller still owns fd
@@ -91,13 +94,30 @@
status_t initOutputCharsKeys(int fd,
::android::hardware::camera::common::V1_0::helper::CameraMetadata*);
+ // Helper function for initOutputCharskeys
+ template <size_t SIZE>
+ status_t initOutputCharskeysByFormat(
+ ::android::hardware::camera::common::V1_0::helper::CameraMetadata*,
+ uint32_t fourcc, const std::array<int, SIZE>& formats,
+ int scaler_stream_config_tag,
+ int stream_configuration, int min_frame_duration, int stall_duration);
+
+ bool calculateMinFps(::android::hardware::camera::common::V1_0::helper::CameraMetadata*);
+
static void getFrameRateList(int fd, double fpsUpperBound, SupportedV4L2Format* format);
+ static void updateFpsBounds(int fd, CroppingType cropType,
+ const std::vector<ExternalCameraConfig::FpsLimitation>& fpsLimits,
+ SupportedV4L2Format format,
+ std::vector<SupportedV4L2Format>& outFmts);
+
// Get candidate supported formats list of input cropping type.
static std::vector<SupportedV4L2Format> getCandidateSupportedFormatsLocked(
int fd, CroppingType cropType,
const std::vector<ExternalCameraConfig::FpsLimitation>& fpsLimits,
- const Size& minStreamSize);
+ const std::vector<ExternalCameraConfig::FpsLimitation>& depthFpsLimits,
+ const Size& minStreamSize,
+ bool depthEnabled);
// Trim supported format list by the cropping type. Also sort output formats by width/height
static void trimSupportedFormats(CroppingType cropType,
/*inout*/std::vector<SupportedV4L2Format>* pFmts);
diff --git a/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraUtils.h b/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraUtils.h
index 5754ccb..f696057 100644
--- a/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraUtils.h
+++ b/camera/device/3.4/default/include/ext_device_v3_4_impl/ExternalCameraUtils.h
@@ -17,12 +17,13 @@
#ifndef ANDROID_HARDWARE_CAMERA_DEVICE_V3_4_EXTCAMUTIL_H
#define ANDROID_HARDWARE_CAMERA_DEVICE_V3_4_EXTCAMUTIL_H
-#include <inttypes.h>
-#include "utils/LightRefBase.h"
-#include <mutex>
-#include <vector>
-#include <unordered_set>
#include <android/hardware/graphics/mapper/2.0/IMapper.h>
+#include <inttypes.h>
+#include <mutex>
+#include <unordered_set>
+#include <vector>
+#include "tinyxml2.h" // XML parsing
+#include "utils/LightRefBase.h"
using android::hardware::graphics::mapper::V2_0::IMapper;
using android::hardware::graphics::mapper::V2_0::YCbCrLayout;
@@ -71,17 +72,22 @@
// Size of v4l2 buffer queue when streaming > kMaxVideoSize
uint32_t numStillBuffers;
+ // Indication that the device connected supports depth output
+ bool depthEnabled;
+
struct FpsLimitation {
Size size;
double fpsUpperBound;
};
std::vector<FpsLimitation> fpsLimits;
+ std::vector<FpsLimitation> depthFpsLimits;
// Minimum output stream size
Size minStreamSize;
private:
ExternalCameraConfig();
+ static bool updateFpsList(tinyxml2::XMLElement* fpsList, std::vector<FpsLimitation>& fpsLimits);
};
} // common
diff --git a/camera/metadata/3.3/types.hal b/camera/metadata/3.3/types.hal
index 27d82b9..539ae68 100644
--- a/camera/metadata/3.3/types.hal
+++ b/camera/metadata/3.3/types.hal
@@ -22,7 +22,6 @@
package android.hardware.camera.metadata@3.3;
-/* Include definitions from all prior minor HAL metadata revisions */
import android.hardware.camera.metadata@3.2;
/**
diff --git a/current.txt b/current.txt
index cb6c19a..69996dd 100644
--- a/current.txt
+++ b/current.txt
@@ -390,7 +390,7 @@
684702a60deef03a1e8093961dc0a18c555c857ad5a77ba7340b0635ae01eb70 android.hardware.camera.device@3.4::ICameraDeviceSession
f8a19622cb0cc890913b1ef3e32b675ffb26089a09e02fef4056ebad324d2b5d android.hardware.camera.device@3.4::types
291638a1b6d4e63283e9e722ab5049d9351717ffa2b66162124f84d1aa7c2835 android.hardware.camera.metadata@3.2::types
-8a075cf3a17fe99c6d23415a3e9a65612f1fee73ee052a3a8a0ca5b8877395a4 android.hardware.camera.metadata@3.3::types
+f4aca082ad436f00b3bed8b9b9dfdc01f6460afdbee7ca10fedb5e34bddcc96f android.hardware.camera.metadata@3.3::types
da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
21165b8e30c4b2d52980e4728f661420adc16e38bbe73476c06b2085be908f4c android.hardware.gnss@1.0::IGnssCallback
d702fb01dc2a0733aa820b7eb65435ee3334f75632ef880bafd2fb8803a20a58 android.hardware.gnss@1.0::IGnssMeasurementCallback
@@ -446,11 +446,11 @@
2b4a14661e6a38617b7dd0c6ebb66a56a90e564674ac7697a14cb8a0cab92b2f android.hardware.health.storage@1.0::types
4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback
19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext
-96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice
+b83317b66721241887d2770b5ae95fd5af1e77c5daa7530ecb08fae8892f2b43 android.hardware.neuralnetworks@1.2::IDevice
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
-83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel
+36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-114056b3b9303e0e858f28e718ba45722de5678d1d54eec0dcd10788604bf2bb android.hardware.neuralnetworks@1.2::types
+209a5ee694b94328afb2af2768f1fe6a69148e2cbb85ec3c340a36eed818c697 android.hardware.neuralnetworks@1.2::types
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 7eea7fc..106f332 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -52,6 +52,7 @@
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
@@ -77,6 +78,13 @@
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
}
+static bool isZeroSized(const MixedTyped& example, uint32_t index) {
+ for (auto i : example.operandDimensions.at(index)) {
+ if (i == 0) return true;
+ }
+ return false;
+}
+
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
@@ -178,17 +186,18 @@
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
bool sizeLargerThanOne = true;
- for_all(golden, [&outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
+ for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
if (index == 0) {
// On OutputType::INSUFFICIENT, set the output operand with index 0 with
// buffer size one byte less than needed.
if (outputType == OutputType::INSUFFICIENT) {
- if (s > 1)
+ if (s > 1 && !isZeroSized(golden, index)) {
s -= 1;
- else
+ } else {
sizeLargerThanOne = false;
+ }
}
}
RequestArgument arg = {
@@ -532,7 +541,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index b9fa388..d83f9e6 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -76,6 +76,17 @@
getType() generates (ErrorStatus status, DeviceType type);
/**
+ * Gets the capabilities of a driver.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
* Gets information about extensions supported by the driver implementation.
*
* All extension operations and operands must be fully supported for the
@@ -113,44 +124,83 @@
generates (ErrorStatus status, vec<bool> supportedOperations);
/**
- * Gets whether the driver supports compilation caching.
+ * Gets the caching requirements of the driver implementation.
*
- * isCachingSupported indicates whether the driver supports compilation caching.
- * Even if so, the driver may still choose not to cache certain compiled models.
+ * There are two types of cache file descriptors provided to the driver: model cache
+ * and data cache.
*
- * If the device reports the caching is not supported, the user may avoid calling
- * IDevice::prepareModelFromCache and IPreparedModel::saveToCache.
+ * The data cache is for caching constant data, possibly including preprocessed
+ * and transformed tensor buffers. Any modification to the data cache should
+ * have no worse effect than generating bad output values at execution time.
+ *
+ * The model cache is for caching security-sensitive data such as compiled
+ * executable machine code in the device's native binary format. A modification
+ * to the model cache may affect the driver's execution behavior, and a malicious
+ * client could make use of this to execute beyond the granted permission. Thus,
+ * the driver must always check whether the model cache is corrupted before
+ * preparing the model from cache.
+ *
+ * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver
+ * implementation needs to cache a single prepared model. Returning 0 for both types
+ * indicates compilation caching is not supported by this driver. The driver may
+ * still choose not to cache certain compiled models even if it reports that caching
+ * is supported.
+ *
+ * If the device reports that caching is not supported, the user may avoid calling
+ * IDevice::prepareModelFromCache or providing cache file descriptors to
+ * IDevice::prepareModel_1_2.
*
* @return status Error status of the call, must be:
* - NONE if successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
- * @return supported A boolean indicating whether the driver supports compilation
- * caching. Even on returning true, the driver may still choose
- * not to cache certain compiled models.
+ * @return numModelCache An unsigned integer indicating how many files for model cache
+ * the driver needs to cache a single prepared model. It must
+ * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
+ * @return numDataCache An unsigned integer indicating how many files for data cache
+ * the driver needs to cache a single prepared model. It must
+ * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
*/
- isCachingSupported() generates (ErrorStatus status, bool supported);
+ getNumberOfCacheFilesNeeded()
+ generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache);
/**
- * Creates a prepared model for execution.
+ * Asynchronously creates a prepared model for execution and optionally saves it
+ * into cache files.
*
- * prepareModel is used to make any necessary transformations or alternative
+ * prepareModel is used to make any necessary transformations to or alternative
* representations to a model for execution, possibly including
* transformations on the constant data, optimization on the model's graph,
* or compilation into the device's native binary format. The model itself
* is not changed.
*
+ * Optionally, caching information may be provided for the driver to save
+ * the prepared model to cache files for faster model compilation time
+ * when the same model preparation is requested in the future. There are
+ * two types of cache file handles provided to the driver: model cache
+ * and data cache. For more information on the two types of cache handles,
+ * refer to getNumberOfCacheFilesNeeded.
+ *
+ * The file descriptors must be opened with read and write permission. A file may
+ * have any size, and the corresponding file descriptor may have any offset. The
+ * driver must truncate a file to zero size before writing to that file. The file
+ * descriptors may be closed by the client once the asynchronous preparation has
+ * finished. The driver must dup a file descriptor if it wants to get access to
+ * the cache file later.
+ *
* The model is prepared asynchronously with respect to the caller. The
- * prepareModel function must verify the inputs to the prepareModel function
- * are correct. If there is an error, prepareModel must immediately invoke
+ * prepareModel function must verify the inputs to the preparedModel function
+ * related to preparing the model (as opposed to saving the prepared model to
+ * cache) are correct. If there is an error, prepareModel must immediately invoke
* the callback with the appropriate ErrorStatus value and nullptr for the
- * IPreparedModel, then return with the same ErrorStatus. If the inputs to
- * the prepareModel function are valid and there is no error, prepareModel
- * must launch an asynchronous task to prepare the model in the background,
- * and immediately return from prepareModel with ErrorStatus::NONE. If the
- * asynchronous task fails to launch, prepareModel must immediately invoke
- * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
- * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
+ * IPreparedModel, then return with the same ErrorStatus. If the inputs to the
+ * prepareModel function that are related to preparing the model are valid and
+ * there is no error, prepareModel must launch an asynchronous task
+ * to prepare the model in the background, and immediately return from
+ * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch,
+ * prepareModel must immediately invoke the callback with
+ * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return
+ * with ErrorStatus::GENERAL_FAILURE.
*
* When the asynchronous task has finished preparing the model, it must
* immediately invoke the callback function provided as an input to
@@ -160,6 +210,14 @@
* the callback object must be invoked with the appropriate ErrorStatus
* value and nullptr for the IPreparedModel.
*
+ * Optionally, the driver may save the prepared model to cache during the
+ * asynchronous preparation. Any error that occurs when saving to cache must
+ * not affect the status of preparing the model. Even if the input arguments
+ * related to the cache may be invalid, or the driver may fail to save to cache,
+ * the prepareModel function must finish preparing the model. The driver
+ * may choose not to save to cache even if the caching information is
+ * provided and valid.
+ *
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time. As
* such, some driver services may return partially prepared models, where
@@ -173,6 +231,26 @@
* @param model The model to be prepared for execution.
* @param preference Indicates the intended execution behavior of a prepared
* model.
+ * @param modelCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the security-sensitive cache. The length of
+ * the vector must either be 0 indicating that caching information is not provided,
+ * or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache
+ * handles will be provided in the same order when retrieving the
+ * preparedModel from cache files with prepareModelFromCache.
+ * @param dataCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the constants' cache. The length of
+ * the vector must either be 0 indicating that caching information is not provided,
+ * or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache
+ * handles will be provided in the same order when retrieving the
+ * preparedModel from cache files with prepareModelFromCache.
+ * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
+ * identifying the prepared model. The same token will be provided when retrieving
+ * the prepared model from the cache files with prepareModelFromCache.
+ * Tokens should be chosen to have a low rate of collision for a particular
+ * application. The driver cannot detect a collision; a collision will result
+ * in a failed execution or in a successful execution that produces incorrect
+ * output values. If both modelCache and dataCache are empty indicating that
+ * caching information is not provided, this token must be ignored.
* @param callback A callback object used to return the error status of
* preparing the model for execution and the prepared model if
* successful, nullptr otherwise. The callback object's notify function
@@ -182,9 +260,12 @@
* - NONE if preparation task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
- * - INVALID_ARGUMENT if one of the input arguments is invalid
+ * - INVALID_ARGUMENT if one of the input arguments related to preparing the
+ * model is invalid
*/
prepareModel_1_2(Model model, ExecutionPreference preference,
+ vec<handle> modelCache, vec<handle> dataCache,
+ uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
generates (ErrorStatus status);
@@ -192,22 +273,17 @@
* Creates a prepared model from cache files for execution.
*
* prepareModelFromCache is used to retrieve a prepared model directly from
- * cache files to avoid slow model compilation time. There are exactly two
- * cache file descriptors provided to the driver: modelCache and dataCache.
+ * cache files to avoid slow model compilation time. There are
+ * two types of cache file handles provided to the driver: model cache
+ * and data cache. For more information on the two types of cache handles,
+ * refer to getNumberOfCacheFilesNeeded.
*
- * The dataCache is for caching constant data, possibly including preprocessed
- * and transformed tensor buffers. Any modification to the dataCache should
- * have no worse effect than generating bad output values at execution time.
- *
- * The modelCache is for caching security-sensitive data such as compiled
- * executable machine code in the device's native binary format. A modification
- * to the modelCache may affect the driver's execution behavior, and a malicious
- * client could make use of this to execute beyond the granted permission. Thus,
- * the driver must always check whether the modelCache is corrupted before preparing
- * the model from cache.
- *
- * The two file descriptors may be closed by the client once the asynchronous
- * preparation has finished. The driver has to copy all the data it needs.
+ * The file descriptors must be opened with read and write permission. A file may
+ * have any size, and the corresponding file descriptor may have any offset. The
+ * driver must truncate a file to zero size before writing to that file. The file
+ * descriptors may be closed by the client once the asynchronous preparation has
+ * finished. The driver must dup a file descriptor if it wants to get access to
+ * the cache file later.
*
* The model is prepared asynchronously with respect to the caller. The
* prepareModelFromCache function must verify the inputs to the
@@ -241,13 +317,17 @@
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
- * @param modelCache A handle holding exactly one cache file descriptor for the
- * security-sensitive cache.
- * @param dataCache A handle holding exactly one cache file descriptor for the
- * constants' cache.
+ * @param modelCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the security-sensitive cache. The length of
+ * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
+ * The cache handles will be provided in the same order as with prepareModel_1_2.
+ * @param dataCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the constants' cache. The length of the vector
+ * must match the numDataCache returned from getNumberOfCacheFilesNeeded.
+ * The cache handles will be provided in the same order as with prepareModel_1_2.
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
* identifying the prepared model. It is the same token provided when saving
- * the cache files with IPreparedModel::saveToCache. Tokens should be chosen
+ * the cache files with prepareModel_1_2. Tokens should be chosen
* to have a low rate of collision for a particular application. The driver
* cannot detect a collision; a collision will result in a failed execution
* or in a successful execution that produces incorrect output values.
@@ -263,7 +343,7 @@
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid
*/
- prepareModelFromCache(handle modelCache, handle dataCache,
+ prepareModelFromCache(vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
generates (ErrorStatus status);
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
index 757d5f1..5d2d80f 100644
--- a/neuralnetworks/1.2/IPreparedModel.hal
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -157,62 +157,4 @@
fmq_sync<FmqRequestDatum> requestChannel,
fmq_sync<FmqResultDatum> resultChannel)
generates (ErrorStatus status, IBurstContext context);
-
- /*
- * Saves the prepared model to cache files.
- *
- * saveToCache is used to save a prepared model to cache files for faster
- * model compilation time when the same model preparation is requested in
- * the future. There are exactly two cache file descriptors provided to the
- * driver: modelCache and dataCache.
- *
- * The dataCache is for caching constant data, possibly including preprocessed
- * and transformed tensor buffers. Any modification to the dataCache should
- * have no worse effect than generating bad output values at execution time.
- *
- * The modelCache is for caching security-sensitive data such as compiled
- * executable machine code in the device's native binary format. A modification
- * to the modelCache may affect the driver's execution behavior, and a malicious
- * client could make use of this to execute beyond the granted permission. Thus,
- * the driver must always check whether the modelCache is corrupted before preparing
- * the model from cache.
- *
- * The two file descriptors must point to two zero-length files with offset
- * positioned at the beginning of the file. The file descriptors may be closed
- * by the client once the method has returned.
- *
- * If the driver decides not to save the prepared model without looking at the
- * input arguments to the saveToCache function, saveToCache must return with
- * ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify
- * the input arguments to the saveToCache function are valid, and return with
- * ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver
- * could not save the prepared model, saveToCache must return with the appropriate
- * ErrorStatus. Otherwise, it must write the cache files and return
- * ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents
- * of the cache files are undefined.
- *
- * @param modelCache A handle holding exactly one cache file descriptor for the
- * security-sensitive cache.
- * @param dataCache A handle holding exactly one cache file descriptor for the
- * constants' cache.
- * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
- * identifying the prepared model. The same token will be provided
- * when retrieving the prepared model from cache files with
- * IDevice::prepareModelFromCache. Tokens should be chosen to have
- * a low rate of collision for a particular application. The driver
- * cannot detect a collision; a collision will result in a failed
- * execution or in a successful execution that produces incorrect
- * output values.
- * @return status Error status of saveToCache, must be:
- * - NONE if saveToCache is performed successfully
- * - DEVICE_UNAVAILABLE if driver is offline or busy
- * - GENERAL_FAILURE if the driver could not save the
- * prepared model or if there is an unspecified error
- * - INVALID_ARGUMENT if one of the input arguments is invalid,
- * unless the driver decides not to save the prepared model
- * without looking at the input arguments
- */
- saveToCache(handle modelCache, handle dataCache,
- uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token)
- generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index f2e02b8..8c57796 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -30,6 +30,11 @@
* The byte size of the cache token.
*/
BYTE_SIZE_OF_CACHE_TOKEN = 32,
+
+ /**
+ * The maximum number of files for each type of cache in compilation caching.
+ */
+ MAX_NUMBER_OF_CACHE_FILES = 32,
};
enum OperandType : @1.0::OperandType {
@@ -182,6 +187,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -231,7 +240,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -257,7 +267,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -304,6 +315,7 @@
* Before API level 29, all input tensors of
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* must have the same scale and zeroPoint as the output tensor.
+ * Since API level 29, zero-sized tensors are supported.
* * n: An {@link OperandType::INT32} scalar, specifying the
* concatenation axis.
*
@@ -361,7 +373,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input.
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
@@ -408,7 +421,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input.
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
@@ -450,11 +464,10 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out]. Before API level 29,
+ * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
+ * following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
*
* Available since API level 27.
*/
@@ -600,11 +613,10 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out]. Before API level 29,
+ * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
+ * following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
*
* Available since API level 27.
*/
@@ -672,7 +684,7 @@
* Supported tensor rank: up to 4
*
* Inputs:
- * * 0: A tensor.
+ * * 0: A tensor. Since API level 29, this tensor may be zero-sized.
*
* Outputs:
* * 0: A tensor with the same shape as input0.
@@ -765,7 +777,8 @@
* [batch_size, input_size], where "input_size" corresponds to the
* number of inputs to the layer, matching the second dimension of
* weights, and "batch_size" is calculated by dividing the number of
- * elements by "input_size".
+ * elements by "input_size". Since API level 29, zero batch_size is
+ * supported for this tensor.
* * 1: A 2-D tensor, specifying the weights, of shape
* [num_units, input_size], where "num_units" corresponds to the number
* of output nodes.
@@ -780,10 +793,10 @@
* invoke on the result.
*
* Outputs:
- * * 0: The output tensor, of shape [batch_size, num_units]. For output
- * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied:
- * output_scale > input_scale * filter_scale.
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before API
+ * level 29, For output tensor of {@link
+ * OperandType::TENSOR_QUANT8_ASYMM}, the following condition must be
+ * satisfied: output_scale > input_scale * filter_scale.
*
* Available since API level 27.
*/
@@ -861,6 +874,7 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
*
* Supported tensor rank: up to 4
* Tensors with rank less than 4 are only supported since API level 29.
@@ -875,6 +889,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
*
* Available since API level 27.
*/
@@ -905,7 +921,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -931,7 +948,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -1021,7 +1039,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1333,7 +1352,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -1359,7 +1379,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -1406,6 +1427,10 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -1441,7 +1466,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1465,7 +1491,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1489,7 +1516,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1541,9 +1569,12 @@
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
*
- * Inputs:
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 2: An {@link OperandType::INT32} scalar, specifying the output
@@ -1552,6 +1583,24 @@
* Set to true to specify NCHW data layout for input0 and output0.
* Available since API level 29.
*
+ * Inputs (resizing by scale, since API level 29):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
@@ -1637,7 +1686,8 @@
* Tensors with rank other than 2 or 4 are only supported since API level 29.
*
* Inputs:
- * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since
+ * API level 29, this tensor may be zero-sized.
* * 1: A scalar, specifying the positive scaling factor for the exponent,
* beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
@@ -1795,7 +1845,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1862,6 +1913,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -2095,6 +2150,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -2135,6 +2194,7 @@
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since API level 29, this tensor may be zero-sized.
* * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
* the permutation of the dimensions of the input tensor.
*
@@ -2231,7 +2291,8 @@
* * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
* bounding box proposals, each line with format [x1, y1, x2, y2].
* For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
- * the zeroPoint must be 0 and the scale must be 0.125.
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
* * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
* bounding box delta for each region of interest and each class. The
* bounding box deltas are organized in the following order
@@ -2240,10 +2301,12 @@
* and height, dw and dh is the log-scale relative correction factor
* for the width and height. For input0 of type
* {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
* * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
* each image in the batch, each line with format
* [image_height, image_width].
@@ -2272,113 +2335,113 @@
* Inputs:
* * 0: The input.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
* where "max_time" is the number of timesteps (sequence length),
* "batch_size" corresponds to the batching dimension, and
* "input_size" is the size of the input.
* * 1: The forward input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size], where “num_units”
- * corresponds to the number of cell units.
+ * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+ * corresponds to the number of forward cell units.
* * 2: The forward input-to-forget weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 3: The forward input-to-cell weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 4: The forward input-to-output weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 5: The forward recurrent-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, output_size], where “output_size”
- * corresponds to either the number of cell units (i.e., “num_units”),
- * or the second dimension of the “projection_weights”, if defined.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the “fw_projection_weights”, if defined.
* * 6: The forward recurrent-to-forget weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 7: The forward recurrent-to-cell weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 8: The forward recurrent-to-output weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 9: The forward cell-to-input weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 10: The forward cell-to-forget weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 11: The forward cell-to-output weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 12: The forward input gate bias. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 13: The forward forget gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 14: The forward cell gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 15: The forward output gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 16: The forward projection weights. Optional.
- * A 2-D tensor of shape [output_size, num_units].
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
* * 17: The forward projection bias. Optional.
- * A 1-D tensor of shape [output_size].
+ * A 1-D tensor of shape [fw_output_size].
* * 18: The backward input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size], where “num_units”
- * corresponds to the number of cell units.
+ * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+ * corresponds to the number of backward cell units.
* * 19: The backward input-to-forget weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 20: The backward input-to-cell weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 21: The backward input-to-output weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 22: The backward recurrent-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, output_size], where “output_size”
- * corresponds to either the number of cell units (i.e., “num_units”),
- * or the second dimension of the “projection_weights”, if defined.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+ * corresponds to either the number of cell units (i.e., “bw_num_units”),
+ * or the second dimension of the “bw_projection_weights”, if defined.
* * 23: The backward recurrent-to-forget weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 24: The backward recurrent-to-cell weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 25: The backward recurrent-to-output weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 26: The backward cell-to-input weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 27: The backward cell-to-forget weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 28: The backward cell-to-output weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 29: The backward input gate bias. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 30: The backward forget gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 31: The backward cell gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 32: The backward output gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 33: The backward projection weights. Optional.
- * A 2-D tensor of shape [output_size, num_units].
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
* * 34: The backward projection bias. Optional.
- * A 1-D tensor of shape [output_size].
+ * A 1-D tensor of shape [bw_output_size].
* * 35: The forward input activation state.
- * A 2-D tensor of shape [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, bw_output_size].
* * 36: The forward input cell state.
- * A 2-D tensor of shape [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, bw_num_units].
* * 37: The backward input activation state.
- * A 2-D tensor of shape [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, bw_output_size].
* * 38: The backward input cell state.
- * A 2-D tensor of shape [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, bw_num_units].
* * 39: The auxiliary input. Optional.
* A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
* corresponds to the batching dimension, and “input_size” is the size
* of the input.
* * 40: The forward auxiliary input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 41: The forward auxiliary input-to-forget weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 42: The forward auxiliary input-to-cell weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 43: The forward auxiliary input-to-output weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 44: The backward auxiliary input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 45: The backward auxiliary input-to-forget weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 46: The backward auxiliary input-to-cell weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 47: The backward auxiliary input-to-output weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 48: The activation function.
* A value indicating the activation function:
* <ul>
@@ -2410,16 +2473,46 @@
* * 52: time_major
* An {@link OperandType::BOOL} scalar specifying the shape format
* of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
*
* Outputs:
* * 0: The forward output.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
* * 1: The backward output. Unused if merge_outputs is true.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
*
* Available since API level 29.
*/
@@ -2547,10 +2640,17 @@
/**
* Greedily selects a subset of bounding boxes in descending order of score.
*
- * This op applies hard NMS algorithm to each class. In each loop of
- * execution, the box with maximum score gets selected, and any boxes with
- * the intersection-over-union (IOU) greater than a threshold are removed
- * from the pending set.
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
*
* Axis-aligned bounding boxes are represented by its upper-left corner
* coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
@@ -2564,25 +2664,34 @@
* Inputs:
* * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
* of each bounding box proposal. The boxes are grouped by batches in the
- * first dimension.
+ * first dimension. Zero num_rois is supported for this tensor.
* * 1: A 2-D Tensor specifying the bounding boxes of shape
* [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
* The boxes are grouped by batches in the first dimension. The sequential
* order of the boxes corresponds with input0. For input0 of type
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
* {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
- * scale of 0.125.
+ * scale of 0.125. Zero num_rois is supported for this tensor.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
* the same batch index are grouped together.
* * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
* with scores lower than the threshold are filtered before sending
* to the NMS algorithm.
- * * 4: An {@link OperandType::FLOAT32} scalar, specifying the IoU
- * threshold.
- * * 5: An {@link OperandType::INT32} scalar, specifying the maximum
+ * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
* number of selected bounding boxes for each image. Set to a negative
* value for unlimited number of output bounding boxes.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
*
* Outputs:
* * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
@@ -2600,8 +2709,8 @@
* [num_output_rois], specifying the class of each output box. The
* sequential order of the boxes corresponds with output0.
* * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
- * [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
*
* Available since API level 29.
*/
@@ -2937,8 +3046,8 @@
* For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
* scale must be 0.125 and the zero point must be 0.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
- * [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
*
* Available since API level 29.
*/
@@ -3122,11 +3231,7 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out].
*
* Available since API level 29.
*/
@@ -3608,7 +3713,7 @@
* Supported tensor rank: from 1
*
* Inputs:
- * * 0: A tensor.
+ * * 0: A tensor, may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0, but with
@@ -3940,10 +4045,12 @@
* the regions of interest, each line with format [x1, y1, x2, y2].
* For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
* this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
- * with zeroPoint of 0 and scale of 0.125.
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
* * 3: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 4: An {@link OperandType::INT32} scalar, specifying the output
@@ -4108,7 +4215,7 @@
* Supported tensor rank: from 1
*
* Inputs:
- * * 0: An n-D tensor to take slice from.
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
* * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
* the beginning indices of the slice in each dimension.
* * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
@@ -4331,11 +4438,7 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out].
*
* Available since API level 29.
*/
@@ -4367,9 +4470,9 @@
* Inputs:
* * 0: The input (\f$x_t\f$).
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
- * where “max_size” is the number of timesteps (sequence length),
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where “max_time” is the number of timesteps (sequence length),
* “batch_size” corresponds to the batching dimension, and
* “input_size” is the size of the input.
* * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
@@ -4429,16 +4532,16 @@
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* * 23:Time-major if true, batch-major if false.
- * * 24:The input layer normalization weights.
+ * * 24:The input layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at input gate.
- * * 25:The forget layer normalization weights.
+ * * 25:The forget layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at forget gate.
- * * 26:The cell layer normalization weights.
+ * * 26:The cell layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at cell gate.
- * * 27:The output layer normalization weights.
+ * * 27:The output layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at output gate.
*
@@ -4526,9 +4629,11 @@
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
*
- * Inputs:
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 2: An {@link OperandType::INT32} scalar, specifying the output
@@ -4536,6 +4641,24 @@
* * 3: An {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
*
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
@@ -4593,6 +4716,39 @@
};
/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 365a750..5c269df 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -25,7 +25,7 @@
namespace vts {
namespace functional {
-using V1_1::Capabilities;
+using V1_0::PerformanceInfo;
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -37,6 +37,31 @@
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
+// initialization
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ using OperandPerformance = Capabilities::OperandPerformance;
+ Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
+ const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+
+ auto isPositive = [](const PerformanceInfo& perf) {
+ return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
+ };
+
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto& opPerf = capabilities.operandPerformance;
+ EXPECT_TRUE(std::all_of(
+ opPerf.begin(), opPerf.end(),
+ [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
+ EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ }));
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
// device version test
TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {
@@ -77,10 +102,15 @@
EXPECT_TRUE(ret.isOk());
}
-// isCachingSupported test
-TEST_F(NeuralnetworksHidlTest, IsCachingSupported) {
- Return<void> ret = device->isCachingSupported(
- [](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); });
+// getNumberOfCacheFilesNeeded test
+TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
+ Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_LE(numModelCache,
+ static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ });
EXPECT_TRUE(ret.isOk());
}
} // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 00989e5..167fc09 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -54,29 +54,39 @@
[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
-enum class AccessMode { READ_ONLY, WRITE_ONLY };
+enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
-void createCacheHandle(const std::vector<std::string>& files, AccessMode mode,
- hidl_handle* handle) {
- std::vector<int> fds;
- for (const auto& file : files) {
- int fd;
- if (mode == AccessMode::READ_ONLY) {
- fd = open(file.c_str(), O_RDONLY);
- } else if (mode == AccessMode::WRITE_ONLY) {
- fd = open(file.c_str(), O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR);
- } else {
- FAIL();
+// Creates cache handles based on provided file groups.
+// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups,
+ const std::vector<AccessMode>& mode, hidl_vec<hidl_handle>* handles) {
+ handles->resize(fileGroups.size());
+ for (uint32_t i = 0; i < fileGroups.size(); i++) {
+ std::vector<int> fds;
+ for (const auto& file : fileGroups[i]) {
+ int fd;
+ if (mode[i] == AccessMode::READ_ONLY) {
+ fd = open(file.c_str(), O_RDONLY);
+ } else if (mode[i] == AccessMode::WRITE_ONLY) {
+ fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+ } else if (mode[i] == AccessMode::READ_WRITE) {
+ fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ } else {
+ FAIL();
+ }
+ ASSERT_GE(fd, 0);
+ fds.push_back(fd);
}
- ASSERT_GE(fd, 0);
- fds.push_back(fd);
+ native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
+ ASSERT_NE(cacheNativeHandle, nullptr);
+ std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]);
+ (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true);
}
- native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
- ASSERT_NE(cacheNativeHandle, nullptr);
- for (uint32_t i = 0; i < fds.size(); i++) {
- cacheNativeHandle->data[i] = fds[i];
- }
- handle->setTo(cacheNativeHandle, /*shouldOwn=*/true);
+}
+
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups, AccessMode mode,
+ hidl_vec<hidl_handle>* handles) {
+ createCacheHandles(fileGroups, std::vector<AccessMode>(fileGroups.size(), mode), handles);
}
} // namespace
@@ -88,38 +98,43 @@
NeuralnetworksHidlTest::SetUp();
ASSERT_NE(device.get(), nullptr);
- // Create cache directory. The cache directory and cache files are always created to test
- // the behavior of prepareModelFromCache, even when caching is not supported.
+ // Create cache directory. The cache directory and a temporary cache file is always created
+ // to test the behavior of prepareModelFromCache, even when caching is not supported.
char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
char* cacheDir = mkdtemp(cacheDirTemp);
ASSERT_NE(cacheDir, nullptr);
mCacheDir = cacheDir;
+ mCacheDir.push_back('/');
- // Create empty cache files.
- mCache1 = mCacheDir + "/cache1";
- mCache2 = mCacheDir + "/cache2";
- mCache3 = mCacheDir + "/cache3";
- // A dummy handle, use AccessMode::WRITE_ONLY for createCacheHandle to create files.
- hidl_handle handle;
- createCacheHandle({mCache1, mCache2, mCache3}, AccessMode::WRITE_ONLY, &handle);
-
- // Check if caching is supported.
- bool isCachingSupported;
- Return<void> ret = device->isCachingSupported(
- [&isCachingSupported](ErrorStatus status, bool supported) {
+ Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
- isCachingSupported = supported;
+ mNumModelCache = numModelCache;
+ mNumDataCache = numDataCache;
});
EXPECT_TRUE(ret.isOk());
- if (isCachingSupported) {
- mIsCachingSupported = true;
- } else {
+ mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0;
+
+ // Create empty cache files.
+ mTmpCache = mCacheDir + "tmp";
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ mModelCache.push_back({mCacheDir + "model" + std::to_string(i)});
+ }
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ mDataCache.push_back({mCacheDir + "data" + std::to_string(i)});
+ }
+ // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files.
+ hidl_vec<hidl_handle> modelHandle, dataHandle, tmpHandle;
+ createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle);
+ createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle);
+ createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle);
+
+ if (!mIsCachingSupported) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
"support compilation caching.";
std::cout << "[ ] Early termination of test because vendor service does not "
"support compilation caching."
<< std::endl;
- mIsCachingSupported = false;
}
}
@@ -127,22 +142,49 @@
// The tmp directory is only removed when the driver reports caching not supported,
// otherwise it is kept for debugging purpose.
if (!mIsCachingSupported) {
- remove(mCache1.c_str());
- remove(mCache2.c_str());
- remove(mCache3.c_str());
+ remove(mTmpCache.c_str());
rmdir(mCacheDir.c_str());
}
NeuralnetworksHidlTest::TearDown();
}
- void saveModelToCache(sp<IPreparedModel> preparedModel, const hidl_handle& cache1,
- const hidl_handle& cache2, ErrorStatus* status) {
- // Save IPreparedModel to cache.
+ void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, bool* supported,
+ sp<IPreparedModel>* preparedModel = nullptr) {
+ if (preparedModel != nullptr) *preparedModel = nullptr;
+
+ // See if service can handle model.
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = device->getSupportedOperations_1_2(
+ model,
+ [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_EQ(supported.size(), model.operations.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
+ *supported = fullySupportsModel;
+ if (!fullySupportsModel) return;
+
+ // Launch prepare model.
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
- Return<ErrorStatus> saveToCacheStatus =
- preparedModel->saveToCache(cache1, cache2, cacheToken);
- ASSERT_TRUE(saveToCacheStatus.isOk());
- *status = static_cast<ErrorStatus>(saveToCacheStatus);
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
+ dataCache, cacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
+
+ // Retrieve prepared model.
+ preparedModelCallback->wait();
+ ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ if (preparedModel != nullptr) {
+ *preparedModel =
+ V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+ .withDefault(nullptr);
+ }
}
bool checkEarlyTermination(ErrorStatus status) {
@@ -157,14 +199,27 @@
return false;
}
- void prepareModelFromCache(const hidl_handle& cache1, const hidl_handle& cache2,
+ bool checkEarlyTermination(bool supported) {
+ if (!supported) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return true;
+ }
+ return false;
+ }
+
+ void prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
// Launch prepare model from cache.
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
- Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModelFromCache(cache1, cache2, cacheToken, preparedModelCallback);
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
+ modelCache, dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
*preparedModel = nullptr;
@@ -179,49 +234,54 @@
.withDefault(nullptr);
}
+ // Absolute path to the temporary cache directory.
std::string mCacheDir;
- std::string mCache1;
- std::string mCache2;
- std::string mCache3;
+
+ // Groups of file paths for model and data cache in the tmp cache directory, initialized with
+ // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles
+ // and the inner vector is for fds held by each handle.
+ std::vector<std::vector<std::string>> mModelCache;
+ std::vector<std::vector<std::string>> mDataCache;
+
+ // A separate temporary file path in the tmp cache directory.
+ std::string mTmpCache;
+
uint8_t mToken[static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
- bool mIsCachingSupported;
+ uint32_t mNumModelCache;
+ uint32_t mNumDataCache;
+ uint32_t mIsCachingSupported;
};
TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (!mIsCachingSupported) {
- EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- } else {
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Retrieve preparedModel from cache.
{
preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (!mIsCachingSupported) {
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
} else {
ASSERT_EQ(status, ErrorStatus::NONE);
ASSERT_NE(preparedModel, nullptr);
@@ -238,41 +298,54 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (!mIsCachingSupported) {
- EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- } else {
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ uint8_t dummyBytes[] = {0, 0};
+ // Write a dummy integer to the cache.
+ // The driver should be able to handle non-empty cache and non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes,
+ sizeof(dummyBytes)),
+ sizeof(dummyBytes));
}
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_EQ(
+ write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
+ sizeof(dummyBytes));
+ }
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Retrieve preparedModel from cache.
{
preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_GE(read(cache1.getNativeHandle()->data[0], &dummyByte, 1), 0);
- ASSERT_GE(read(cache2.getNativeHandle()->data[0], &dummyByte, 1), 0);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ // Advance the offset of each handle by one byte.
+ // The driver should be able to handle non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (!mIsCachingSupported) {
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
} else {
ASSERT_EQ(status, ErrorStatus::NONE);
ASSERT_NE(preparedModel, nullptr);
@@ -285,234 +358,512 @@
/*testDynamicOutputShape=*/false);
}
+TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for model cache.
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for data cache.
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // cache1 with invalid NumFd.
- {
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1, mCache3}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
}
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid NumFd.
- {
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2, mCache3}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
}
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
- // cache1 with invalid NumFd.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1, mCache3}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (status != ErrorStatus::GENERAL_FAILURE) {
ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
- ASSERT_EQ(preparedModel, nullptr);
}
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid NumFd.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2, mCache3}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (status != ErrorStatus::GENERAL_FAILURE) {
ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
- ASSERT_EQ(preparedModel, nullptr);
}
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
- // cache1 with invalid access mode.
- {
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid access mode.
- {
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
- // cache1 with invalid access mode.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid access mode.
- {
- preparedModel = nullptr;
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
}
-TEST_F(CompilationCachingTest, SaveToCacheInvalidOffset) {
- // Create test HIDL model and compile.
- Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
-
- // cache1 with invalid file descriptor offset.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-
- // cache2 with invalid file descriptor offset.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-}
-
-TEST_F(CompilationCachingTest, SaveToCacheInvalidFileSize) {
- // Create test HIDL model and compile.
- Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
-
- // cache1 with invalid file size.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Write one byte and seek back to the beginning.
- ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
- ASSERT_EQ(lseek(cache1.getNativeHandle()->data[0], 0, SEEK_SET), 0);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-
- // cache2 with invalid file size.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Write one byte and seek back to the beginning.
- ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
- ASSERT_EQ(lseek(cache2.getNativeHandle()->data[0], 0, SEEK_SET), 0);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-}
-
class CompilationCachingSecurityTest : public CompilationCachingTest,
public ::testing::WithParamInterface<uint32_t> {
protected:
@@ -537,44 +888,44 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // Save the compilation to cache.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
- // Randomly flip one single bit of the cache entry.
- FILE* pFile = fopen(mCache1.c_str(), "r+");
- ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
- long int fileSize = ftell(pFile);
- ASSERT_GT(fileSize, 0);
- ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
- int readByte = fgetc(pFile);
- ASSERT_NE(readByte, EOF);
- ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
- ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
- fclose(pFile);
+ // Randomly flip one single bit of the cache entry.
+ FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+");
+ ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
+ long int fileSize = ftell(pFile);
+ if (fileSize == 0) {
+ fclose(pFile);
+ continue;
+ }
+ ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
+ int readByte = fgetc(pFile);
+ ASSERT_NE(readByte, EOF);
+ ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
+ ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
+ fclose(pFile);
- // Retrieve preparedModel from cache, expect failure.
- {
- preparedModel = nullptr;
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- ASSERT_EQ(preparedModel, nullptr);
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
}
}
@@ -583,40 +934,37 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // Save the compilation to cache.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
- // Randomly append bytes to the cache entry.
- FILE* pFile = fopen(mCache1.c_str(), "a");
- uint32_t appendLength = getRandomInt(1, 256);
- for (uint32_t i = 0; i < appendLength; i++) {
- ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
- }
- fclose(pFile);
+ // Randomly append bytes to the cache entry.
+ FILE* pFile = fopen(mModelCache[i][0].c_str(), "a");
+ uint32_t appendLength = getRandomInt(1, 256);
+ for (uint32_t i = 0; i < appendLength; i++) {
+ ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
+ }
+ fclose(pFile);
- // Retrieve preparedModel from cache, expect failure.
- {
- preparedModel = nullptr;
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- ASSERT_EQ(preparedModel, nullptr);
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
}
}
@@ -625,20 +973,15 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Randomly flip one single bit in mToken.
@@ -647,12 +990,12 @@
// Retrieve the preparedModel from cache, expect failure.
{
- preparedModel = nullptr;
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 7f4d385..dc452e9 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -33,6 +33,7 @@
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -54,7 +55,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_2(model, preference, preparedModelCallback);
+ device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -388,8 +390,9 @@
case OperationType::GROUPED_CONV_2D:
case OperationType::DEPTHWISE_CONV_2D:
case OperationType::CONV_2D: {
- if (operand == 1 && (type == OperandType::TENSOR_QUANT8_ASYMM ||
- type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
+ if (operand == operation.inputs[1] &&
+ (type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
return true;
}
} break;
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index d411da4..b15f657 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -37,6 +37,7 @@
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
@@ -66,7 +67,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));