Merge "ocsp: add getOcsp and setOcsp methods to control ocsp behavior"
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 213c93a..0140275 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -3,3 +3,8 @@
[Builtin Hooks]
clang_format = true
+
+[Hook Scripts]
+aosp_hook_confirmationui = ${REPO_ROOT}/frameworks/base/tools/aosp/aosp_sha.sh ${PREUPLOAD_COMMIT} confirmationui
+aosp_hook_gatekeeper = ${REPO_ROOT}/frameworks/base/tools/aosp/aosp_sha.sh ${PREUPLOAD_COMMIT} gatekeeper
+aosp_hook_keymaster = ${REPO_ROOT}/frameworks/base/tools/aosp/aosp_sha.sh ${PREUPLOAD_COMMIT} keymaster
diff --git a/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc b/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc
index 6e91bcc..72b4d19 100644
--- a/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc
+++ b/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc
@@ -6,9 +6,4 @@
capabilities BLOCK_SUSPEND
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
- # audioflinger restarts itself when it loses connection with the hal
- # and its .rc file has an "onrestart restart audio-hal" rule, thus
- # an additional auto-restart from the init process isn't needed.
- oneshot
- interface android.hardware.audio@4.0::IDevicesFactory default
- interface android.hardware.audio@2.0::IDevicesFactory default
+ onrestart restart audioserver
diff --git a/audio/core/all-versions/vts/functional/Android.bp b/audio/core/all-versions/vts/functional/Android.bp
index 88fdb5a..5e17ade 100644
--- a/audio/core/all-versions/vts/functional/Android.bp
+++ b/audio/core/all-versions/vts/functional/Android.bp
@@ -24,6 +24,7 @@
"libxml2",
],
shared_libs: [
+ "libaudiofoundation",
"libfmq",
],
header_libs: [
diff --git a/automotive/evs/1.0/IEvsCamera.hal b/automotive/evs/1.0/IEvsCamera.hal
index dbcaf92..464dafb 100644
--- a/automotive/evs/1.0/IEvsCamera.hal
+++ b/automotive/evs/1.0/IEvsCamera.hal
@@ -16,7 +16,6 @@
package android.hardware.automotive.evs@1.0;
-import types;
import IEvsCameraStream;
@@ -28,8 +27,8 @@
/**
* Returns the ID of this camera.
*
- * Returns the description of this camera. This must be the same value as reported
- * by EvsEnumerator::getCamerList().
+ * @return info The description of this camera. This must be the same value as
+ * reported by EvsEnumerator::getCameraList().
*/
getCameraInfo() generates (CameraDesc info);
@@ -43,16 +42,20 @@
* in which case buffers should be added or removed from the chain as appropriate.
* If no call is made to this entry point, the IEvsCamera must support at least one
* frame by default. More is acceptable.
- * BUFFER_NOT_AVAILABLE is returned if the implementation cannot support the
- * requested number of concurrent frames.
+ *
+ * @param bufferCount Number of buffers the client of IEvsCamera may hold concurrently.
+ * @return result EvsResult::OK is returned if this call is successful.
*/
setMaxFramesInFlight(uint32_t bufferCount) generates (EvsResult result);
/**
- * Request delivery of EVS camera frames from this camera.
+ * Request to start EVS camera stream from this camera.
*
- * The IEvsCameraStream must begin receiving periodic calls with new image
- * frames until stopVideoStream() is called.
+ * The IEvsCameraStream must begin receiving calls with various events
+ * including new image frame ready until stopVideoStream() is called.
+ *
+ * @param receiver IEvsCameraStream implementation.
+ * @return result EvsResult::OK is returned if this call is successful.
*/
startVideoStream(IEvsCameraStream receiver) generates (EvsResult result);
@@ -64,6 +67,8 @@
* A small, finite number of buffers are available (possibly as small
* as one), and if the supply is exhausted, no further frames may be
* delivered until a buffer is returned.
+ *
+ * @param buffer A buffer to be returned.
*/
oneway doneWithFrame(BufferDesc buffer);
@@ -83,6 +88,11 @@
* The values allowed for opaqueIdentifier are driver specific,
* but no value passed in may crash the driver. The driver should
* return 0 for any unrecognized opaqueIdentifier.
+ *
+ * @param opaqueIdentifier An unique identifier of the information to
+ * request.
+ * @return value Requested information. Zero is returned if the
+ * driver does not recognize a given identifier.
*/
getExtendedInfo(uint32_t opaqueIdentifier) generates (int32_t value);
@@ -94,6 +104,11 @@
* in order to function in a default state.
* INVALID_ARG is returned if the opaqueValue is not meaningful to
* the driver implementation.
+ *
+ * @param opaqueIdentifier An unique identifier of the information to
+ * program.
+ * opaqueValue A value to program.
+ * @return result EvsResult::OK is returned if this call is successful.
*/
setExtendedInfo(uint32_t opaqueIdentifier, int32_t opaqueValue) generates (EvsResult result);
};
diff --git a/automotive/evs/1.0/IEvsCameraStream.hal b/automotive/evs/1.0/IEvsCameraStream.hal
index 4e743b2..ec18f6a 100644
--- a/automotive/evs/1.0/IEvsCameraStream.hal
+++ b/automotive/evs/1.0/IEvsCameraStream.hal
@@ -31,6 +31,8 @@
* When the last frame in the stream has been delivered, a NULL bufferHandle
* must be delivered, signifying the end of the stream. No further frame
* deliveries may happen thereafter.
+ *
+ * @param buffer a buffer descriptor of a delivered image frame.
*/
oneway deliverFrame(BufferDesc buffer);
};
diff --git a/automotive/evs/1.0/IEvsDisplay.hal b/automotive/evs/1.0/IEvsDisplay.hal
index 12541f3..72f767e 100644
--- a/automotive/evs/1.0/IEvsDisplay.hal
+++ b/automotive/evs/1.0/IEvsDisplay.hal
@@ -16,8 +16,6 @@
package android.hardware.automotive.evs@1.0;
-import types;
-
/**
* Represents a single camera and is the primary interface for capturing images.
@@ -28,6 +26,9 @@
* Returns basic information about the EVS display provided by the system.
*
* See the description of the DisplayDesc structure for details.
+ *
+ * @return info The description of this display. Please see the description
+ * of the DisplayDesc structure for details.
*/
getDisplayInfo() generates (DisplayDesc info);
@@ -42,6 +43,9 @@
* video. When the display is no longer required, the client is expected to request
* the NOT_VISIBLE state after passing the last video frame.
* Returns INVALID_ARG if the requested state is not a recognized value.
+ *
+ * @param state Desired new DisplayState.
+ * @return result EvsResult::OK is returned if this call is successful.
*/
setDisplayState(DisplayState state) generates (EvsResult result);
@@ -54,6 +58,8 @@
* the logic responsible for changing display states should generally live above
* the device layer, making it undesirable for the HAL implementation to spontaneously
* change display states.
+ *
+ * @return state Current DisplayState of this Display.
*/
getDisplayState() generates (DisplayState state);
@@ -61,9 +67,11 @@
/**
* This call returns a handle to a frame buffer associated with the display.
*
- * The returned buffer may be locked and written to by software and/or GL. This buffer
- * must be returned via a call to returnTargetBufferForDisplay() even if the
- * display is no longer visible.
+ * @return buffer A handle to a frame buffer. The returned buffer may be
+ * locked and written to by software and/or GL. This buffer
+ * must be returned via a call to
+ * returnTargetBufferForDisplay() even if the display is no
+ * longer visible.
*/
getTargetBuffer() generates (BufferDesc buffer);
@@ -75,6 +83,9 @@
* There is no maximum time the caller may hold onto the buffer before making this
* call. The buffer may be returned at any time and in any DisplayState, but all
* buffers are expected to be returned before the IEvsDisplay interface is destroyed.
+ *
+ * @param buffer A buffer handle to the frame that is ready for display.
+ * @return result EvsResult::OK is returned if this call is successful.
*/
returnTargetBufferForDisplay(BufferDesc buffer) generates (EvsResult result);
};
diff --git a/automotive/evs/1.0/IEvsEnumerator.hal b/automotive/evs/1.0/IEvsEnumerator.hal
index ee51e7e..e5633df 100644
--- a/automotive/evs/1.0/IEvsEnumerator.hal
+++ b/automotive/evs/1.0/IEvsEnumerator.hal
@@ -16,7 +16,6 @@
package android.hardware.automotive.evs@1.0;
-import types;
import IEvsCamera;
import IEvsDisplay;
@@ -28,6 +27,8 @@
/**
* Returns a list of all EVS cameras available to the system
+ *
+ * @return cameras A list of cameras availale for EVS service.
*/
getCameraList() generates (vec<CameraDesc> cameras);
@@ -37,9 +38,9 @@
* Given a camera's unique cameraId from CameraDesc, returns the
* IEvsCamera interface associated with the specified camera. When
* done using the camera, the caller may release it by calling closeCamera().
- * Note: Reliance on the sp<> going out of scope is not recommended
- * because the resources may not be released right away due to asynchronos
- * behavior in the hardware binder (ref b/36122635).
+ *
+ * @param cameraId A unique identifier of the camera.
+ * @return carCamera EvsCamera object associated with a given cameraId.
*/
openCamera(string cameraId) generates (IEvsCamera carCamera);
@@ -48,6 +49,8 @@
*
* When the IEvsCamera object is no longer required, it must be released.
* NOTE: Video streaming must be cleanly stopped before making this call.
+ *
+ * @param carCamera EvsCamera object to be closed.
*/
closeCamera(IEvsCamera carCamera);
@@ -60,8 +63,8 @@
* the old instance shall be closed and give the new caller exclusive
* access.
* When done using the display, the caller may release it by calling closeDisplay().
- * TODO(b/36122635) Reliance on the sp<> going out of scope is not recommended because the
- * resources may not be released right away due to asynchronos behavior in the hardware binder.
+ *
+ * @return display EvsDisplay object to be used.
*/
openDisplay() generates (IEvsDisplay display);
@@ -70,6 +73,8 @@
*
* When the IEvsDisplay object is no longer required, it must be released.
* NOTE: All buffers must have been returned to the display before making this call.
+ *
+ * @param display EvsDisplay object to be closed.
*/
closeDisplay(IEvsDisplay display);
@@ -80,6 +85,8 @@
* the actual state of the active display. This call is replicated on the IEvsEnumerator
* interface in order to allow secondary clients to monitor the state of the EVS display
* without acquiring exclusive ownership of the display.
+ *
+ * @return state Current DisplayState of this Display.
*/
getDisplayState() generates (DisplayState state);
};
diff --git a/automotive/evs/1.0/default/android.hardware.automotive.evs@1.0-service.rc b/automotive/evs/1.0/default/android.hardware.automotive.evs@1.0-service.rc
index 117c249..8dcd969 100644
--- a/automotive/evs/1.0/default/android.hardware.automotive.evs@1.0-service.rc
+++ b/automotive/evs/1.0/default/android.hardware.automotive.evs@1.0-service.rc
@@ -2,3 +2,4 @@
class hal
user automotive_evs
group automotive_evs
+ disabled # do not start automatically
diff --git a/automotive/evs/1.0/types.hal b/automotive/evs/1.0/types.hal
index 7cebf6d..1efd5eb 100644
--- a/automotive/evs/1.0/types.hal
+++ b/automotive/evs/1.0/types.hal
@@ -24,8 +24,15 @@
* EVS camera in the system.
*/
struct CameraDesc {
+ /* Unique identifier for camera devices. This may be a path to detected
+ * camera device; for example, "/dev/video0".
+ */
string cameraId;
- uint32_t vendorFlags; // Opaque value from driver
+
+ /* Opaque value from driver. Vendor may use this field to store additional
+ * information; for example, sensor and bridge chip id.
+ */
+ uint32_t vendorFlags;
};
@@ -38,8 +45,11 @@
* presentation device.
*/
struct DisplayDesc {
+ /* Unique identifier for the display */
string displayId;
- uint32_t vendorFlags; // Opaque value from driver
+
+ /* Opaque value from driver */
+ uint32_t vendorFlags;
};
@@ -56,14 +66,31 @@
* Specifically consider if format and/or usage should become enumerated types.
*/
struct BufferDesc {
- uint32_t width; // Units of pixels
- uint32_t height; // Units of pixels
- uint32_t stride; // Units of pixels to match gralloc
- uint32_t pixelSize; // Units of bytes
- uint32_t format; // May contain values from android_pixel_format_t
- uint32_t usage; // May contain values from from Gralloc.h
- uint32_t bufferId; // Opaque value from driver
- handle memHandle; // gralloc memory buffer handle
+ /* A frame width in the units of pixels */
+ uint32_t width;
+
+ /* A frame height in the units of pixels */
+ uint32_t height;
+
+ /* A frame stride in the units of pixels, to match gralloc */
+ uint32_t stride;
+
+ /* The size of a pixel in the units of bytes */
+ uint32_t pixelSize;
+
+ /* The image format of the frame; may contain values from
+ * android_pixel_format_t
+ */
+ uint32_t format;
+
+ /* May contain values from Gralloc.h */
+ uint32_t usage;
+
+ /* Opaque value from driver */
+ uint32_t bufferId;
+
+ /* Gralloc memory buffer handle */
+ handle memHandle;
};
@@ -77,12 +104,23 @@
* presentation device.
*/
enum DisplayState : uint32_t {
- NOT_OPEN = 0, // Display has not been requested by any application
- NOT_VISIBLE, // Display is inhibited
- VISIBLE_ON_NEXT_FRAME, // Will become visible with next frame
- VISIBLE, // Display is currently active
- DEAD, // Driver is in an undefined state. Interface should be closed.
- NUM_STATES // Must be last
+ /* Display has not been requested by any application yet */
+ NOT_OPEN = 0,
+
+ /* Display is inhibited */
+ NOT_VISIBLE,
+
+ /* Will become visible with next frame */
+ VISIBLE_ON_NEXT_FRAME,
+
+ /* Display is currently active */
+ VISIBLE,
+
+ /* Driver is in an undefined state. Interface should be closed. */
+ DEAD,
+
+ /* Must be the last */
+ NUM_STATES
};
diff --git a/automotive/evs/1.0/vts/functional/Android.bp b/automotive/evs/1.0/vts/functional/Android.bp
index 2ef33fd..8988bfd 100644
--- a/automotive/evs/1.0/vts/functional/Android.bp
+++ b/automotive/evs/1.0/vts/functional/Android.bp
@@ -19,13 +19,15 @@
srcs: [
"VtsHalEvsV1_0TargetTest.cpp",
"FrameHandler.cpp",
- "FormatConvert.cpp"
],
defaults: ["VtsHalTargetTestDefaults"],
shared_libs: [
"libui",
],
- static_libs: ["android.hardware.automotive.evs@1.0"],
+ static_libs: [
+ "android.hardware.automotive.evs@1.0",
+ "android.hardware.automotive.evs@common-default-lib",
+ ],
test_suites: ["general-tests"],
cflags: [
"-O0",
diff --git a/automotive/evs/1.0/vts/functional/FormatConvert.h b/automotive/evs/1.0/vts/functional/FormatConvert.h
deleted file mode 100644
index 4a94f99..0000000
--- a/automotive/evs/1.0/vts/functional/FormatConvert.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef EVS_VTS_FORMATCONVERT_H
-#define EVS_VTS_FORMATCONVERT_H
-
-#include <queue>
-#include <stdint.h>
-
-
-// Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx/BGRx
-// values. The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
-// U/V array. It assumes an even width and height for the overall image, and a horizontal
-// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
-void copyNV21toRGB32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels,
- bool bgrxFormat = false);
-
-void copyNV21toBGR32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels);
-
-
-// Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx/BGRx values.
-// The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
-// by another 1/2 x 1/2 V array. It assumes an even width and height for the overall image,
-// and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
-// and V arrays.
-void copyYV12toRGB32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels,
- bool bgrxFormat = false);
-
-void copyYV12toBGR32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels);
-
-// Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx/BGRx
-// values. The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
-// U/V array. It assumes an even width and height for the overall image, and a horizontal
-// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
-void copyYUYVtoRGB32(unsigned width, unsigned height,
- uint8_t* src, unsigned srcStrideBytes,
- uint32_t* dst, unsigned dstStrideBytes,
- bool bgrxFormat = false);
-
-void copyYUYVtoBGR32(unsigned width, unsigned height,
- uint8_t* src, unsigned srcStrideBytes,
- uint32_t* dst, unsigned dstStrideBytes);
-
-
-// Given an simple rectangular image buffer with an integer number of bytes per pixel,
-// copy the pixel values into a new rectangular buffer (potentially with a different stride).
-// This is typically used to copy RGBx data into an RGBx output buffer.
-void copyMatchedInterleavedFormats(unsigned width, unsigned height,
- void* src, unsigned srcStridePixels,
- void* dst, unsigned dstStridePixels,
- unsigned pixelSize);
-
-#endif // EVS_VTS_FORMATCONVERT_H
diff --git a/automotive/evs/1.0/vts/functional/FrameHandler.cpp b/automotive/evs/1.0/vts/functional/FrameHandler.cpp
index bc3790f..6a01a44 100644
--- a/automotive/evs/1.0/vts/functional/FrameHandler.cpp
+++ b/automotive/evs/1.0/vts/functional/FrameHandler.cpp
@@ -240,46 +240,47 @@
tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
if (srcPixels && tgtPixels) {
+ using namespace ::android::hardware::automotive::evs::common;
if (tgtBuffer.format == HAL_PIXEL_FORMAT_RGBA_8888) {
if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
- copyNV21toRGB32(width, height,
- srcPixels,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyNV21toRGB32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
- copyYV12toRGB32(width, height,
- srcPixels,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyYV12toRGB32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
- copyYUYVtoRGB32(width, height,
- srcPixels, srcBuffer.stride,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyYUYVtoRGB32(width, height,
+ srcPixels, srcBuffer.stride,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == tgtBuffer.format) { // 32bit RGBA
- copyMatchedInterleavedFormats(width, height,
- srcPixels, srcBuffer.stride,
- tgtPixels, tgtBuffer.stride,
- tgtBuffer.pixelSize);
+ Utils::copyMatchedInterleavedFormats(width, height,
+ srcPixels, srcBuffer.stride,
+ tgtPixels, tgtBuffer.stride,
+ tgtBuffer.pixelSize);
} else {
ALOGE("Camera buffer format is not supported");
success = false;
}
} else if (tgtBuffer.format == HAL_PIXEL_FORMAT_BGRA_8888) {
if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
- copyNV21toBGR32(width, height,
- srcPixels,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyNV21toBGR32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
- copyYV12toBGR32(width, height,
- srcPixels,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyYV12toBGR32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
- copyYUYVtoBGR32(width, height,
- srcPixels, srcBuffer.stride,
- tgtPixels, tgtBuffer.stride);
+ Utils::copyYUYVtoBGR32(width, height,
+ srcPixels, srcBuffer.stride,
+ tgtPixels, tgtBuffer.stride);
} else if (srcBuffer.format == tgtBuffer.format) { // 32bit RGBA
- copyMatchedInterleavedFormats(width, height,
- srcPixels, srcBuffer.stride,
- tgtPixels, tgtBuffer.stride,
- tgtBuffer.pixelSize);
+ Utils::copyMatchedInterleavedFormats(width, height,
+ srcPixels, srcBuffer.stride,
+ tgtPixels, tgtBuffer.stride,
+ tgtBuffer.pixelSize);
} else {
ALOGE("Camera buffer format is not supported");
success = false;
diff --git a/automotive/evs/1.1/Android.bp b/automotive/evs/1.1/Android.bp
new file mode 100644
index 0000000..d2e85f1
--- /dev/null
+++ b/automotive/evs/1.1/Android.bp
@@ -0,0 +1,22 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.automotive.evs@1.1",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IEvsCamera.hal",
+ "IEvsCameraStream.hal",
+ ],
+ interfaces: [
+ "android.hardware.automotive.evs@1.0",
+ "android.hardware.graphics.common@1.0",
+ "android.hardware.graphics.common@1.1",
+ "android.hardware.graphics.common@1.2",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/automotive/evs/1.1/IEvsCamera.hal b/automotive/evs/1.1/IEvsCamera.hal
new file mode 100644
index 0000000..e7f6bb7
--- /dev/null
+++ b/automotive/evs/1.1/IEvsCamera.hal
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs@1.1;
+
+import @1.0::IEvsCamera;
+import @1.0::EvsResult;
+import IEvsCameraStream;
+
+/**
+ * Represents a single camera and is the primary interface for capturing images.
+ */
+interface IEvsCamera extends @1.0::IEvsCamera {
+ /**
+ * Requests to pause EVS camera stream events.
+ *
+ * Like stopVideoStream(), events may continue to arrive for some time
+ * after this call returns. Delivered frame buffers must be returned.
+ *
+ * @return result EvsResult::OK is returned if this call is successful.
+ */
+ pauseVideoStream() generates (EvsResult result);
+
+ /**
+ * Requests to resume EVS camera stream.
+ *
+ * @return result EvsResult::OK is returned if this call is successful.
+ */
+ resumeVideoStream() generates (EvsResult result);
+
+ /**
+ * Returns a frame that was delivered by to the IEvsCameraStream.
+ *
+ * When done consuming a frame delivered to the IEvsCameraStream
+ * interface, it must be returned to the IEvsCamera for reuse.
+ * A small, finite number of buffers are available (possibly as small
+ * as one), and if the supply is exhausted, no further frames may be
+ * delivered until a buffer is returned.
+ *
+ * @param buffer A buffer to be returned.
+ * @return result Return EvsResult::OK if this call is successful.
+ */
+ doneWithFrame_1_1(BufferDesc buffer) generates (EvsResult result);
+};
diff --git a/automotive/evs/1.1/IEvsCameraStream.hal b/automotive/evs/1.1/IEvsCameraStream.hal
new file mode 100644
index 0000000..cd058a5
--- /dev/null
+++ b/automotive/evs/1.1/IEvsCameraStream.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs@1.1;
+
+import @1.0::IEvsCameraStream;
+
+/**
+ * Implemented on client side to receive asynchronous video frame deliveries.
+ */
+interface IEvsCameraStream extends @1.0::IEvsCameraStream {
+ /**
+ * Receives calls from the HAL each time an event happens.
+ *
+ * @param event EVS event with possible event information.
+ */
+ oneway notifyEvent(EvsEvent event);
+};
diff --git a/automotive/evs/1.1/default/Android.bp b/automotive/evs/1.1/default/Android.bp
new file mode 100644
index 0000000..411f0ff
--- /dev/null
+++ b/automotive/evs/1.1/default/Android.bp
@@ -0,0 +1,32 @@
+cc_binary {
+ name: "android.hardware.automotive.evs@1.1-service",
+ defaults: ["hidl_defaults"],
+ proprietary: true,
+ relative_install_path: "hw",
+ srcs: [
+ "service.cpp",
+ "EvsCamera.cpp",
+ "EvsEnumerator.cpp",
+ "EvsDisplay.cpp"
+ ],
+ init_rc: ["android.hardware.automotive.evs@1.1-service.rc"],
+
+ shared_libs: [
+ "android.hardware.automotive.evs@1.0",
+ "android.hardware.automotive.evs@1.1",
+ "libbase",
+ "libbinder",
+ "libcutils",
+ "libhardware",
+ "libhidlbase",
+ "libhidltransport",
+ "liblog",
+ "libui",
+ "libutils",
+ ],
+
+ cflags: [
+ "-O0",
+ "-g",
+ ],
+}
diff --git a/automotive/evs/1.1/default/EvsCamera.cpp b/automotive/evs/1.1/default/EvsCamera.cpp
new file mode 100644
index 0000000..62d9826
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsCamera.cpp
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.automotive.evs@1.1-service"
+
+#include "EvsCamera.h"
+#include "EvsEnumerator.h"
+
+#include <ui/GraphicBufferAllocator.h>
+#include <ui/GraphicBufferMapper.h>
+
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+// Special camera names for which we'll initialize alternate test data
+const char EvsCamera::kCameraName_Backup[] = "backup";
+
+
+// Arbitrary limit on number of graphics buffers allowed to be allocated
+// Safeguards against unreasonable resource consumption and provides a testable limit
+const unsigned MAX_BUFFERS_IN_FLIGHT = 100;
+
+
+EvsCamera::EvsCamera(const char *id) :
+ mFramesAllowed(0),
+ mFramesInUse(0),
+ mStreamState(STOPPED) {
+
+ ALOGD("EvsCamera instantiated");
+
+ mDescription.cameraId = id;
+
+ // Set up dummy data for testing
+ if (mDescription.cameraId == kCameraName_Backup) {
+ mWidth = 640; // full NTSC/VGA
+ mHeight = 480; // full NTSC/VGA
+ mDescription.vendorFlags = 0xFFFFFFFF; // Arbitrary value
+ } else {
+ mWidth = 320; // 1/2 NTSC/VGA
+ mHeight = 240; // 1/2 NTSC/VGA
+ }
+
+ mFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ mUsage = GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_CAMERA_WRITE |
+ GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_RARELY;
+}
+
+
+EvsCamera::~EvsCamera() {
+ ALOGD("EvsCamera being destroyed");
+ forceShutdown();
+}
+
+
+//
+// This gets called if another caller "steals" ownership of the camera
+//
+void EvsCamera::forceShutdown()
+{
+ ALOGD("EvsCamera forceShutdown");
+
+ // Make sure our output stream is cleaned up
+ // (It really should be already)
+ stopVideoStream();
+
+ // Claim the lock while we work on internal state
+ std::lock_guard <std::mutex> lock(mAccessLock);
+
+ // Drop all the graphics buffers we've been using
+ if (mBuffers.size() > 0) {
+ GraphicBufferAllocator& alloc(GraphicBufferAllocator::get());
+ for (auto&& rec : mBuffers) {
+ if (rec.inUse) {
+ ALOGE("Error - releasing buffer despite remote ownership");
+ }
+ alloc.free(rec.handle);
+ rec.handle = nullptr;
+ }
+ mBuffers.clear();
+ }
+
+ // Put this object into an unrecoverable error state since somebody else
+ // is going to own the underlying camera now
+ mStreamState = DEAD;
+}
+
+
+// Methods from ::android::hardware::automotive::evs::V1_0::IEvsCamera follow.
+Return<void> EvsCamera::getCameraInfo(getCameraInfo_cb _hidl_cb) {
+ ALOGD("getCameraInfo");
+
+ // Send back our self description
+ _hidl_cb(mDescription);
+ return Void();
+}
+
+
+Return<EvsResult> EvsCamera::setMaxFramesInFlight(uint32_t bufferCount) {
+ ALOGD("setMaxFramesInFlight");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // If we've been displaced by another owner of the camera, then we can't do anything else
+ if (mStreamState == DEAD) {
+ ALOGE("ignoring setMaxFramesInFlight call when camera has been lost.");
+ return EvsResult::OWNERSHIP_LOST;
+ }
+
+ // We cannot function without at least one video buffer to send data
+ if (bufferCount < 1) {
+ ALOGE("Ignoring setMaxFramesInFlight with less than one buffer requested");
+ return EvsResult::INVALID_ARG;
+ }
+
+ // Update our internal state
+ if (setAvailableFrames_Locked(bufferCount)) {
+ return EvsResult::OK;
+ } else {
+ return EvsResult::BUFFER_NOT_AVAILABLE;
+ }
+}
+
+
+Return<EvsResult> EvsCamera::startVideoStream(const ::android::sp<IEvsCameraStream_1_0>& stream) {
+ ALOGD("startVideoStream");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // If we've been displaced by another owner of the camera, then we can't do anything else
+ if (mStreamState == DEAD) {
+ ALOGE("ignoring startVideoStream call when camera has been lost.");
+ return EvsResult::OWNERSHIP_LOST;
+ }
+ if (mStreamState != STOPPED) {
+ ALOGE("ignoring startVideoStream call when a stream is already running.");
+ return EvsResult::STREAM_ALREADY_RUNNING;
+ }
+
+ // If the client never indicated otherwise, configure ourselves for a single streaming buffer
+ if (mFramesAllowed < 1) {
+ if (!setAvailableFrames_Locked(1)) {
+ ALOGE("Failed to start stream because we couldn't get a graphics buffer");
+ return EvsResult::BUFFER_NOT_AVAILABLE;
+ }
+ }
+
+ // Record the user's callback for use when we have a frame ready
+ mStream = IEvsCameraStream_1_1::castFrom(stream).withDefault(nullptr);
+ if (mStream == nullptr) {
+ ALOGE("Default implementation does not support v1.0 IEvsCameraStream");
+ return EvsResult::INVALID_ARG;
+ }
+
+ // Start the frame generation thread
+ mStreamState = RUNNING;
+ mCaptureThread = std::thread([this](){ generateFrames(); });
+
+ return EvsResult::OK;
+}
+
+
+Return<void> EvsCamera::doneWithFrame(const BufferDesc_1_0& buffer) {
+ std::lock_guard <std::mutex> lock(mAccessLock);
+ returnBuffer(buffer.bufferId, buffer.memHandle);
+
+ return Void();
+}
+
+
+Return<void> EvsCamera::stopVideoStream() {
+ ALOGD("stopVideoStream");
+ std::unique_lock <std::mutex> lock(mAccessLock);
+
+ if (mStreamState == RUNNING) {
+ // Tell the GenerateFrames loop we want it to stop
+ mStreamState = STOPPING;
+
+ // Block outside the mutex until the "stop" flag has been acknowledged
+ // We won't send any more frames, but the client might still get some already in flight
+ ALOGD("Waiting for stream thread to end..");
+ lock.unlock();
+ mCaptureThread.join();
+ lock.lock();
+
+ mStreamState = STOPPED;
+ mStream = nullptr;
+ ALOGD("Stream marked STOPPED.");
+ }
+
+ return Void();
+}
+
+
+Return<int32_t> EvsCamera::getExtendedInfo(uint32_t opaqueIdentifier) {
+ ALOGD("getExtendedInfo");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // For any single digit value, return the index itself as a test value
+ if (opaqueIdentifier <= 9) {
+ return opaqueIdentifier;
+ }
+
+ // Return zero by default as required by the spec
+ return 0;
+}
+
+
+Return<EvsResult> EvsCamera::setExtendedInfo(uint32_t /*opaqueIdentifier*/, int32_t /*opaqueValue*/) {
+ ALOGD("setExtendedInfo");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // If we've been displaced by another owner of the camera, then we can't do anything else
+ if (mStreamState == DEAD) {
+ ALOGE("ignoring setExtendedInfo call when camera has been lost.");
+ return EvsResult::OWNERSHIP_LOST;
+ }
+
+ // We don't store any device specific information in this implementation
+ return EvsResult::INVALID_ARG;
+}
+
+
+// Methods from ::android::hardware::automotive::evs::V1_1::IEvsCamera follow.
+Return<EvsResult> EvsCamera::doneWithFrame_1_1(const BufferDesc_1_1& bufDesc) {
+ std::lock_guard <std::mutex> lock(mAccessLock);
+ returnBuffer(bufDesc.bufferId, bufDesc.buffer.nativeHandle);
+
+ return EvsResult::OK;
+}
+
+
+Return<EvsResult> EvsCamera::pauseVideoStream() {
+ // Default implementation does not support this.
+ return EvsResult::UNDERLYING_SERVICE_ERROR;
+}
+
+
+Return<EvsResult> EvsCamera::resumeVideoStream() {
+ // Default implementation does not support this.
+ return EvsResult::UNDERLYING_SERVICE_ERROR;
+}
+
+
+bool EvsCamera::setAvailableFrames_Locked(unsigned bufferCount) {
+ if (bufferCount < 1) {
+ ALOGE("Ignoring request to set buffer count to zero");
+ return false;
+ }
+ if (bufferCount > MAX_BUFFERS_IN_FLIGHT) {
+ ALOGE("Rejecting buffer request in excess of internal limit");
+ return false;
+ }
+
+ // Is an increase required?
+ if (mFramesAllowed < bufferCount) {
+ // An increase is required
+ unsigned needed = bufferCount - mFramesAllowed;
+ ALOGI("Allocating %d buffers for camera frames", needed);
+
+ unsigned added = increaseAvailableFrames_Locked(needed);
+ if (added != needed) {
+ // If we didn't add all the frames we needed, then roll back to the previous state
+ ALOGE("Rolling back to previous frame queue size");
+ decreaseAvailableFrames_Locked(added);
+ return false;
+ }
+ } else if (mFramesAllowed > bufferCount) {
+ // A decrease is required
+ unsigned framesToRelease = mFramesAllowed - bufferCount;
+ ALOGI("Returning %d camera frame buffers", framesToRelease);
+
+ unsigned released = decreaseAvailableFrames_Locked(framesToRelease);
+ if (released != framesToRelease) {
+ // This shouldn't happen with a properly behaving client because the client
+ // should only make this call after returning sufficient outstanding buffers
+ // to allow a clean resize.
+ ALOGE("Buffer queue shrink failed -- too many buffers currently in use?");
+ }
+ }
+
+ return true;
+}
+
+
+unsigned EvsCamera::increaseAvailableFrames_Locked(unsigned numToAdd) {
+ // Acquire the graphics buffer allocator
+ GraphicBufferAllocator &alloc(GraphicBufferAllocator::get());
+
+ unsigned added = 0;
+
+ while (added < numToAdd) {
+ buffer_handle_t memHandle = nullptr;
+ status_t result = alloc.allocate(mWidth, mHeight, mFormat, 1, mUsage,
+ &memHandle, &mStride, 0, "EvsCamera");
+ if (result != NO_ERROR) {
+ ALOGE("Error %d allocating %d x %d graphics buffer", result, mWidth, mHeight);
+ break;
+ }
+ if (!memHandle) {
+ ALOGE("We didn't get a buffer handle back from the allocator");
+ break;
+ }
+
+ // Find a place to store the new buffer
+ bool stored = false;
+ for (auto&& rec : mBuffers) {
+ if (rec.handle == nullptr) {
+ // Use this existing entry
+ rec.handle = memHandle;
+ rec.inUse = false;
+ stored = true;
+ break;
+ }
+ }
+ if (!stored) {
+ // Add a BufferRecord wrapping this handle to our set of available buffers
+ mBuffers.emplace_back(memHandle);
+ }
+
+ mFramesAllowed++;
+ added++;
+ }
+
+ return added;
+}
+
+
+unsigned EvsCamera::decreaseAvailableFrames_Locked(unsigned numToRemove) {
+ // Acquire the graphics buffer allocator
+ GraphicBufferAllocator &alloc(GraphicBufferAllocator::get());
+
+ unsigned removed = 0;
+
+ for (auto&& rec : mBuffers) {
+ // Is this record not in use, but holding a buffer that we can free?
+ if ((rec.inUse == false) && (rec.handle != nullptr)) {
+ // Release buffer and update the record so we can recognize it as "empty"
+ alloc.free(rec.handle);
+ rec.handle = nullptr;
+
+ mFramesAllowed--;
+ removed++;
+
+ if (removed == numToRemove) {
+ break;
+ }
+ }
+ }
+
+ return removed;
+}
+
+
+// This is the asynchronous frame generation thread that runs in parallel with the
+// main serving thread. There is one for each active camera instance.
+void EvsCamera::generateFrames() {
+ ALOGD("Frame generation loop started");
+
+ unsigned idx;
+
+ while (true) {
+ bool timeForFrame = false;
+ nsecs_t startTime = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Lock scope for updating shared state
+ {
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ if (mStreamState != RUNNING) {
+ // Break out of our main thread loop
+ break;
+ }
+
+ // Are we allowed to issue another buffer?
+ if (mFramesInUse >= mFramesAllowed) {
+ // Can't do anything right now -- skip this frame
+ ALOGW("Skipped a frame because too many are in flight\n");
+ } else {
+ // Identify an available buffer to fill
+ for (idx = 0; idx < mBuffers.size(); idx++) {
+ if (!mBuffers[idx].inUse) {
+ if (mBuffers[idx].handle != nullptr) {
+ // Found an available record, so stop looking
+ break;
+ }
+ }
+ }
+ if (idx >= mBuffers.size()) {
+ // This shouldn't happen since we already checked mFramesInUse vs mFramesAllowed
+ ALOGE("Failed to find an available buffer slot\n");
+ } else {
+ // We're going to make the frame busy
+ mBuffers[idx].inUse = true;
+ mFramesInUse++;
+ timeForFrame = true;
+ }
+ }
+ }
+
+ if (timeForFrame) {
+ // Assemble the buffer description we'll transmit below
+ BufferDesc_1_1 newBuffer = {};
+ AHardwareBuffer_Desc* pDesc =
+ reinterpret_cast<AHardwareBuffer_Desc *>(&newBuffer.buffer.description);
+ pDesc->width = mWidth;
+ pDesc->height = mHeight;
+ pDesc->layers = 1;
+ pDesc->format = mFormat;
+ pDesc->usage = mUsage;
+ pDesc->stride = mStride;
+ newBuffer.buffer.nativeHandle = mBuffers[idx].handle;
+ newBuffer.pixelSize = sizeof(uint32_t);
+ newBuffer.bufferId = idx;
+
+ // Write test data into the image buffer
+ fillTestFrame(newBuffer);
+
+ // Issue the (asynchronous) callback to the client -- can't be holding the lock
+ EvsEvent event;
+ event.buffer(newBuffer);
+ auto result = mStream->notifyEvent(event);
+ if (result.isOk()) {
+ ALOGD("Delivered %p as id %d",
+ newBuffer.buffer.nativeHandle.getNativeHandle(), newBuffer.bufferId);
+ } else {
+ // This can happen if the client dies and is likely unrecoverable.
+ // To avoid consuming resources generating failing calls, we stop sending
+ // frames. Note, however, that the stream remains in the "STREAMING" state
+ // until cleaned up on the main thread.
+ ALOGE("Frame delivery call failed in the transport layer.");
+
+ // Since we didn't actually deliver it, mark the frame as available
+ std::lock_guard<std::mutex> lock(mAccessLock);
+ mBuffers[idx].inUse = false;
+ mFramesInUse--;
+
+ break;
+ }
+ }
+
+ // We arbitrarily choose to generate frames at 12 fps to ensure we pass the 10fps test requirement
+ static const int kTargetFrameRate = 12;
+ static const nsecs_t kTargetFrameTimeUs = 1000*1000 / kTargetFrameRate;
+ const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ const nsecs_t workTimeUs = (now - startTime) / 1000;
+ const nsecs_t sleepDurationUs = kTargetFrameTimeUs - workTimeUs;
+ if (sleepDurationUs > 0) {
+ usleep(sleepDurationUs);
+ }
+ }
+
+ // If we've been asked to stop, send an event to signal the actual end of stream
+ EvsEvent event;
+ event.info(EvsEventType::STREAM_STOPPED);
+ auto result = mStream->notifyEvent(event);
+ if (!result.isOk()) {
+ ALOGE("Error delivering end of stream marker");
+ }
+
+ return;
+}
+
+
+void EvsCamera::fillTestFrame(const BufferDesc_1_1& buff) {
+ // Lock our output buffer for writing
+ uint32_t *pixels = nullptr;
+ const AHardwareBuffer_Desc* pDesc =
+ reinterpret_cast<const AHardwareBuffer_Desc *>(&buff.buffer.description);
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+ mapper.lock(buff.buffer.nativeHandle,
+ GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_SW_READ_NEVER,
+ android::Rect(pDesc->width, pDesc->height),
+ (void **) &pixels);
+
+ // If we failed to lock the pixel buffer, we're about to crash, but log it first
+ if (!pixels) {
+ ALOGE("Camera failed to gain access to image buffer for writing");
+ }
+
+ // Fill in the test pixels
+ for (unsigned row = 0; row < pDesc->height; row++) {
+ for (unsigned col = 0; col < pDesc->width; col++) {
+ // Index into the row to check the pixel at this column.
+ // We expect 0xFF in the LSB channel, a vertical gradient in the
+ // second channel, a horitzontal gradient in the third channel, and
+ // 0xFF in the MSB.
+ // The exception is the very first 32 bits which is used for the
+ // time varying frame signature to avoid getting fooled by a static image.
+ uint32_t expectedPixel = 0xFF0000FF | // MSB and LSB
+ ((row & 0xFF) << 8) | // vertical gradient
+ ((col & 0xFF) << 16); // horizontal gradient
+ if ((row | col) == 0) {
+ static uint32_t sFrameTicker = 0;
+ expectedPixel = (sFrameTicker) & 0xFF;
+ sFrameTicker++;
+ }
+ pixels[col] = expectedPixel;
+ }
+ // Point to the next row
+ // NOTE: stride retrieved from gralloc is in units of pixels
+ pixels = pixels + pDesc->stride;
+ }
+
+ // Release our output buffer
+ mapper.unlock(buff.buffer.nativeHandle);
+}
+
+
+void EvsCamera::fillTestFrame(const BufferDesc_1_0& buff) {
+ BufferDesc_1_1 newBufDesc = {};
+ AHardwareBuffer_Desc desc = {
+ buff.width, // width
+ buff.height, // height
+ 1, // layers, always 1 for EVS
+ buff.format, // One of AHardwareBuffer_Format
+ buff.usage, // Combination of AHardwareBuffer_UsageFlags
+ buff.stride, // Row stride in pixels
+ 0, // Reserved
+ 0 // Reserved
+ };
+ memcpy(&desc, &newBufDesc.buffer.description, sizeof(desc));
+ newBufDesc.buffer.nativeHandle = buff.memHandle;
+ newBufDesc.pixelSize = buff.pixelSize;
+ newBufDesc.bufferId = buff.bufferId;
+
+ return fillTestFrame(newBufDesc);
+}
+
+
+void EvsCamera::returnBuffer(const uint32_t bufferId, const buffer_handle_t memHandle) {
+ std::lock_guard <std::mutex> lock(mAccessLock);
+
+ if (memHandle == nullptr) {
+ ALOGE("ignoring doneWithFrame called with null handle");
+ } else if (bufferId >= mBuffers.size()) {
+ ALOGE("ignoring doneWithFrame called with invalid bufferId %d (max is %zu)",
+ bufferId, mBuffers.size()-1);
+ } else if (!mBuffers[bufferId].inUse) {
+ ALOGE("ignoring doneWithFrame called on frame %d which is already free",
+ bufferId);
+ } else {
+ // Mark the frame as available
+ mBuffers[bufferId].inUse = false;
+ mFramesInUse--;
+
+ // If this frame's index is high in the array, try to move it down
+ // to improve locality after mFramesAllowed has been reduced.
+ if (bufferId >= mFramesAllowed) {
+ // Find an empty slot lower in the array (which should always exist in this case)
+ for (auto&& rec : mBuffers) {
+ if (rec.handle == nullptr) {
+ rec.handle = mBuffers[bufferId].handle;
+ mBuffers[bufferId].handle = nullptr;
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/evs/1.1/default/EvsCamera.h b/automotive/evs/1.1/default/EvsCamera.h
new file mode 100644
index 0000000..0982464
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsCamera.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERA_H
+#define ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERA_H
+
+#include <android/hardware/automotive/evs/1.1/types.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCameraStream.h>
+#include <ui/GraphicBuffer.h>
+
+#include <thread>
+
+using BufferDesc_1_0 = ::android::hardware::automotive::evs::V1_0::BufferDesc;
+using BufferDesc_1_1 = ::android::hardware::automotive::evs::V1_1::BufferDesc;
+using IEvsCameraStream_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCameraStream;
+using IEvsCameraStream_1_1 = ::android::hardware::automotive::evs::V1_1::IEvsCameraStream;
+using ::android::hardware::automotive::evs::V1_0::EvsResult;
+using ::android::hardware::automotive::evs::V1_0::CameraDesc;
+
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+// From EvsEnumerator.h
+class EvsEnumerator;
+
+
+class EvsCamera : public IEvsCamera {
+public:
+ // Methods from ::android::hardware::automotive::evs::V1_0::IEvsCamera follow.
+ Return<void> getCameraInfo(getCameraInfo_cb _hidl_cb) override;
+ Return<EvsResult> setMaxFramesInFlight(uint32_t bufferCount) override;
+ Return<EvsResult> startVideoStream(const ::android::sp<IEvsCameraStream_1_0>& stream) override;
+ Return<void> stopVideoStream() override;
+ Return<void> doneWithFrame(const BufferDesc_1_0& buffer) override;
+
+ Return<int32_t> getExtendedInfo(uint32_t opaqueIdentifier) override;
+ Return<EvsResult> setExtendedInfo(uint32_t opaqueIdentifier, int32_t opaqueValue) override;
+
+ // Methods from ::android::hardware::automotive::evs::V1_1::IEvsCamera follow.
+ Return<EvsResult> pauseVideoStream() override;
+ Return<EvsResult> resumeVideoStream() override;
+ Return<EvsResult> doneWithFrame_1_1(const BufferDesc_1_1& buffer) override;
+
+ // Implementation details
+ EvsCamera(const char *id);
+ virtual ~EvsCamera() override;
+ void forceShutdown(); // This gets called if another caller "steals" ownership of the camera
+
+ const CameraDesc& getDesc() { return mDescription; };
+
+ static const char kCameraName_Backup[];
+
+private:
+ // These three functions are expected to be called while mAccessLock is held
+ bool setAvailableFrames_Locked(unsigned bufferCount);
+ unsigned increaseAvailableFrames_Locked(unsigned numToAdd);
+ unsigned decreaseAvailableFrames_Locked(unsigned numToRemove);
+
+ void generateFrames();
+ void fillTestFrame(const BufferDesc_1_0& buff);
+ void fillTestFrame(const BufferDesc_1_1& buff);
+ void returnBuffer(const uint32_t bufferId, const buffer_handle_t memHandle);
+
+ sp<EvsEnumerator> mEnumerator; // The enumerator object that created this camera
+
+ CameraDesc mDescription = {}; // The properties of this camera
+
+ std::thread mCaptureThread; // The thread we'll use to synthesize frames
+
+ uint32_t mWidth = 0; // Horizontal pixel count in the buffers
+ uint32_t mHeight = 0; // Vertical pixel count in the buffers
+ uint32_t mFormat = 0; // Values from android_pixel_format_t
+ uint64_t mUsage = 0; // Values from from Gralloc.h
+ uint32_t mStride = 0; // Bytes per line in the buffers
+
+ sp<IEvsCameraStream_1_1> mStream = nullptr; // The callback used to deliver each frame
+
+ struct BufferRecord {
+ buffer_handle_t handle;
+ bool inUse;
+
+ explicit BufferRecord(buffer_handle_t h) : handle(h), inUse(false) {};
+ };
+
+ std::vector <BufferRecord> mBuffers; // Graphics buffers to transfer images
+ unsigned mFramesAllowed; // How many buffers are we currently using
+ unsigned mFramesInUse; // How many buffers are currently outstanding
+
+ enum StreamStateValues {
+ STOPPED,
+ RUNNING,
+ STOPPING,
+ DEAD,
+ };
+ StreamStateValues mStreamState;
+
+ // Synchronization necessary to deconflict mCaptureThread from the main service thread
+ std::mutex mAccessLock;
+};
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERA_H
diff --git a/automotive/evs/1.1/default/EvsDisplay.cpp b/automotive/evs/1.1/default/EvsDisplay.cpp
new file mode 100644
index 0000000..74c099a
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsDisplay.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.automotive.evs@1.1-service"
+
+#include "EvsDisplay.h"
+
+#include <ui/GraphicBufferAllocator.h>
+#include <ui/GraphicBufferMapper.h>
+
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+EvsDisplay::EvsDisplay() {
+ ALOGD("EvsDisplay instantiated");
+
+ // Set up our self description
+ // NOTE: These are arbitrary values chosen for testing
+ mInfo.displayId = "Mock Display";
+ mInfo.vendorFlags = 3870;
+
+ // Assemble the buffer description we'll use for our render target
+ mBuffer.width = 320;
+ mBuffer.height = 240;
+ mBuffer.format = HAL_PIXEL_FORMAT_RGBA_8888;
+ mBuffer.usage = GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER;
+ mBuffer.bufferId = 0x3870; // Arbitrary magic number for self recognition
+ mBuffer.pixelSize = 4;
+}
+
+
+EvsDisplay::~EvsDisplay() {
+ ALOGD("EvsDisplay being destroyed");
+ forceShutdown();
+}
+
+
+/**
+ * This gets called if another caller "steals" ownership of the display
+ */
+void EvsDisplay::forceShutdown()
+{
+ ALOGD("EvsDisplay forceShutdown");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // If the buffer isn't being held by a remote client, release it now as an
+ // optimization to release the resources more quickly than the destructor might
+ // get called.
+ if (mBuffer.memHandle) {
+ // Report if we're going away while a buffer is outstanding
+ if (mFrameBusy) {
+ ALOGE("EvsDisplay going down while client is holding a buffer");
+ }
+
+ // Drop the graphics buffer we've been using
+ GraphicBufferAllocator& alloc(GraphicBufferAllocator::get());
+ alloc.free(mBuffer.memHandle);
+ mBuffer.memHandle = nullptr;
+ }
+
+ // Put this object into an unrecoverable error state since somebody else
+ // is going to own the display now.
+ mRequestedState = DisplayState::DEAD;
+}
+
+
+/**
+ * Returns basic information about the EVS display provided by the system.
+ * See the description of the DisplayDesc structure for details.
+ */
+Return<void> EvsDisplay::getDisplayInfo(getDisplayInfo_cb _hidl_cb) {
+ ALOGD("getDisplayInfo");
+
+ // Send back our self description
+ _hidl_cb(mInfo);
+ return Void();
+}
+
+
+/**
+ * Clients may set the display state to express their desired state.
+ * The HAL implementation must gracefully accept a request for any state
+ * while in any other state, although the response may be to ignore the request.
+ * The display is defined to start in the NOT_VISIBLE state upon initialization.
+ * The client is then expected to request the VISIBLE_ON_NEXT_FRAME state, and
+ * then begin providing video. When the display is no longer required, the client
+ * is expected to request the NOT_VISIBLE state after passing the last video frame.
+ */
+Return<EvsResult> EvsDisplay::setDisplayState(DisplayState state) {
+ ALOGD("setDisplayState");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ if (mRequestedState == DisplayState::DEAD) {
+ // This object no longer owns the display -- it's been superceeded!
+ return EvsResult::OWNERSHIP_LOST;
+ }
+
+ // Ensure we recognize the requested state so we don't go off the rails
+ if (state < DisplayState::NUM_STATES) {
+ // Record the requested state
+ mRequestedState = state;
+ return EvsResult::OK;
+ }
+ else {
+ // Turn off the display if asked for an unrecognized state
+ mRequestedState = DisplayState::NOT_VISIBLE;
+ return EvsResult::INVALID_ARG;
+ }
+}
+
+
+/**
+ * The HAL implementation should report the actual current state, which might
+ * transiently differ from the most recently requested state. Note, however, that
+ * the logic responsible for changing display states should generally live above
+ * the device layer, making it undesirable for the HAL implementation to
+ * spontaneously change display states.
+ */
+Return<DisplayState> EvsDisplay::getDisplayState() {
+ ALOGD("getDisplayState");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ return mRequestedState;
+}
+
+
+/**
+ * This call returns a handle to a frame buffer associated with the display.
+ * This buffer may be locked and written to by software and/or GL. This buffer
+ * must be returned via a call to returnTargetBufferForDisplay() even if the
+ * display is no longer visible.
+ */
+// TODO: We need to know if/when our client dies so we can get the buffer back! (blocked b/31632518)
+Return<void> EvsDisplay::getTargetBuffer(getTargetBuffer_cb _hidl_cb) {
+ ALOGD("getTargetBuffer");
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ if (mRequestedState == DisplayState::DEAD) {
+ ALOGE("Rejecting buffer request from object that lost ownership of the display.");
+ BufferDesc_1_0 nullBuff = {};
+ _hidl_cb(nullBuff);
+ return Void();
+ }
+
+ // If we don't already have a buffer, allocate one now
+ if (!mBuffer.memHandle) {
+ // Allocate the buffer that will hold our displayable image
+ buffer_handle_t handle = nullptr;
+ GraphicBufferAllocator& alloc(GraphicBufferAllocator::get());
+ status_t result = alloc.allocate(
+ mBuffer.width, mBuffer.height, mBuffer.format, 1, mBuffer.usage,
+ &handle, &mBuffer.stride, 0, "EvsDisplay");
+ if (result != NO_ERROR) {
+ ALOGE("Error %d allocating %d x %d graphics buffer",
+ result, mBuffer.width, mBuffer.height);
+ BufferDesc_1_0 nullBuff = {};
+ _hidl_cb(nullBuff);
+ return Void();
+ }
+ if (!handle) {
+ ALOGE("We didn't get a buffer handle back from the allocator");
+ BufferDesc_1_0 nullBuff = {};
+ _hidl_cb(nullBuff);
+ return Void();
+ }
+
+ mBuffer.memHandle = handle;
+ mFrameBusy = false;
+ ALOGD("Allocated new buffer %p with stride %u",
+ mBuffer.memHandle.getNativeHandle(), mBuffer.stride);
+ }
+
+ // Do we have a frame available?
+ if (mFrameBusy) {
+ // This means either we have a 2nd client trying to compete for buffers
+ // (an unsupported mode of operation) or else the client hasn't returned
+ // a previously issued buffer yet (they're behaving badly).
+ // NOTE: We have to make the callback even if we have nothing to provide
+ ALOGE("getTargetBuffer called while no buffers available.");
+ BufferDesc_1_0 nullBuff = {};
+ _hidl_cb(nullBuff);
+ return Void();
+ } else {
+ // Mark our buffer as busy
+ mFrameBusy = true;
+
+ // Send the buffer to the client
+ ALOGD("Providing display buffer handle %p as id %d",
+ mBuffer.memHandle.getNativeHandle(), mBuffer.bufferId);
+ _hidl_cb(mBuffer);
+ return Void();
+ }
+}
+
+
+/**
+ * This call tells the display that the buffer is ready for display.
+ * The buffer is no longer valid for use by the client after this call.
+ */
+Return<EvsResult> EvsDisplay::returnTargetBufferForDisplayImpl(const uint32_t bufferId, const buffer_handle_t memHandle) {
+ ALOGD("returnTargetBufferForDisplay %p", memHandle);
+ std::lock_guard<std::mutex> lock(mAccessLock);
+
+ // Nobody should call us with a null handle
+ if (!memHandle) {
+ ALOGE ("returnTargetBufferForDisplay called without a valid buffer handle.\n");
+ return EvsResult::INVALID_ARG;
+ }
+ if (bufferId != mBuffer.bufferId) {
+ ALOGE ("Got an unrecognized frame returned.\n");
+ return EvsResult::INVALID_ARG;
+ }
+ if (!mFrameBusy) {
+ ALOGE ("A frame was returned with no outstanding frames.\n");
+ return EvsResult::BUFFER_NOT_AVAILABLE;
+ }
+
+ mFrameBusy = false;
+
+ // If we've been displaced by another owner of the display, then we can't do anything else
+ if (mRequestedState == DisplayState::DEAD) {
+ return EvsResult::OWNERSHIP_LOST;
+ }
+
+ // If we were waiting for a new frame, this is it!
+ if (mRequestedState == DisplayState::VISIBLE_ON_NEXT_FRAME) {
+ mRequestedState = DisplayState::VISIBLE;
+ }
+
+ // Validate we're in an expected state
+ if (mRequestedState != DisplayState::VISIBLE) {
+ // We shouldn't get frames back when we're not visible.
+ ALOGE ("Got an unexpected frame returned while not visible - ignoring.\n");
+ } else {
+ // This is where the buffer would be made visible.
+ // For now we simply validate it has the data we expect in it by reading it back
+
+ // Lock our display buffer for reading
+ uint32_t* pixels = nullptr;
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+ mapper.lock(mBuffer.memHandle,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_NEVER,
+ android::Rect(mBuffer.width, mBuffer.height),
+ (void **)&pixels);
+
+ // If we failed to lock the pixel buffer, we're about to crash, but log it first
+ if (!pixels) {
+ ALOGE("Display failed to gain access to image buffer for reading");
+ }
+
+ // Check the test pixels
+ bool frameLooksGood = true;
+ for (unsigned row = 0; row < mBuffer.height; row++) {
+ for (unsigned col = 0; col < mBuffer.width; col++) {
+ // Index into the row to check the pixel at this column.
+ // We expect 0xFF in the LSB channel, a vertical gradient in the
+ // second channel, a horitzontal gradient in the third channel, and
+ // 0xFF in the MSB.
+ // The exception is the very first 32 bits which is used for the
+ // time varying frame signature to avoid getting fooled by a static image.
+ uint32_t expectedPixel = 0xFF0000FF | // MSB and LSB
+ ((row & 0xFF) << 8) | // vertical gradient
+ ((col & 0xFF) << 16); // horizontal gradient
+ if ((row | col) == 0) {
+ // we'll check the "uniqueness" of the frame signature below
+ continue;
+ }
+ // Walk across this row (we'll step rows below)
+ uint32_t receivedPixel = pixels[col];
+ if (receivedPixel != expectedPixel) {
+ ALOGE("Pixel check mismatch in frame buffer");
+ frameLooksGood = false;
+ break;
+ }
+ }
+
+ if (!frameLooksGood) {
+ break;
+ }
+
+ // Point to the next row (NOTE: gralloc reports stride in units of pixels)
+ pixels = pixels + mBuffer.stride;
+ }
+
+ // Ensure we don't see the same buffer twice without it being rewritten
+ static uint32_t prevSignature = ~0;
+ uint32_t signature = pixels[0] & 0xFF;
+ if (prevSignature == signature) {
+ frameLooksGood = false;
+ ALOGE("Duplicate, likely stale frame buffer detected");
+ }
+
+
+ // Release our output buffer
+ mapper.unlock(mBuffer.memHandle);
+
+ if (!frameLooksGood) {
+ return EvsResult::UNDERLYING_SERVICE_ERROR;
+ }
+ }
+
+ return EvsResult::OK;
+}
+
+
+Return<EvsResult> EvsDisplay::returnTargetBufferForDisplay(const BufferDesc_1_0& buffer) {
+ return returnTargetBufferForDisplayImpl(buffer.bufferId, buffer.memHandle);
+}
+
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/evs/1.1/default/EvsDisplay.h b/automotive/evs/1.1/default/EvsDisplay.h
new file mode 100644
index 0000000..2a56535
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsDisplay.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSDISPLAY_H
+#define ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSDISPLAY_H
+
+#include <android/hardware/automotive/evs/1.0/IEvsDisplay.h>
+#include <ui/GraphicBuffer.h>
+
+using ::android::hardware::automotive::evs::V1_0::IEvsDisplay;
+using ::android::hardware::automotive::evs::V1_0::DisplayDesc;
+using ::android::hardware::automotive::evs::V1_0::DisplayState;
+using ::android::hardware::automotive::evs::V1_0::EvsResult;
+using BufferDesc_1_0 = ::android::hardware::automotive::evs::V1_0::BufferDesc;
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+class EvsDisplay : public IEvsDisplay {
+public:
+ // Methods from ::android::hardware::automotive::evs::V1_0::IEvsDisplay follow.
+ Return<void> getDisplayInfo(getDisplayInfo_cb _hidl_cb) override;
+ Return<EvsResult> setDisplayState(DisplayState state) override;
+ Return<DisplayState> getDisplayState() override;
+ Return<void> getTargetBuffer(getTargetBuffer_cb _hidl_cb) override;
+ Return<EvsResult> returnTargetBufferForDisplay(const BufferDesc_1_0& buffer) override;
+
+
+ // Implementation details
+ EvsDisplay();
+ virtual ~EvsDisplay() override;
+
+ void forceShutdown(); // This gets called if another caller "steals" ownership of the display
+ Return<EvsResult> returnTargetBufferForDisplayImpl(const uint32_t bufferId,
+ const buffer_handle_t memHandle);
+
+private:
+ DisplayDesc mInfo = {};
+ BufferDesc_1_0 mBuffer = {}; // A graphics buffer into which we'll store images
+
+ bool mFrameBusy = false; // A flag telling us our buffer is in use
+ DisplayState mRequestedState = DisplayState::NOT_VISIBLE;
+
+ std::mutex mAccessLock;
+};
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSDISPLAY_H
diff --git a/automotive/evs/1.1/default/EvsEnumerator.cpp b/automotive/evs/1.1/default/EvsEnumerator.cpp
new file mode 100644
index 0000000..b324907
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsEnumerator.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.automotive.evs@1.1-service"
+
+#include "EvsEnumerator.h"
+#include "EvsCamera.h"
+#include "EvsDisplay.h"
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+// NOTE: All members values are static so that all clients operate on the same state
+// That is to say, this is effectively a singleton despite the fact that HIDL
+// constructs a new instance for each client.
+std::list<EvsEnumerator::CameraRecord> EvsEnumerator::sCameraList;
+wp<EvsDisplay> EvsEnumerator::sActiveDisplay;
+
+
+EvsEnumerator::EvsEnumerator() {
+ ALOGD("EvsEnumerator created");
+
+ // Add sample camera data to our list of cameras
+ // In a real driver, this would be expected to can the available hardware
+ sCameraList.emplace_back(EvsCamera::kCameraName_Backup);
+ sCameraList.emplace_back("LaneView");
+ sCameraList.emplace_back("right turn");
+}
+
+
+// Methods from ::android::hardware::automotive::evs::V1_0::IEvsEnumerator follow.
+Return<void> EvsEnumerator::getCameraList(getCameraList_cb _hidl_cb) {
+ ALOGD("getCameraList");
+
+ const unsigned numCameras = sCameraList.size();
+
+ // Build up a packed array of CameraDesc for return
+ // NOTE: Only has to live until the callback returns
+ std::vector<CameraDesc_1_0> descriptions;
+ descriptions.reserve(numCameras);
+ for (const auto& cam : sCameraList) {
+ descriptions.push_back( cam.desc );
+ }
+
+ // Encapsulate our camera descriptions in the HIDL vec type
+ hidl_vec<CameraDesc_1_0> hidlCameras(descriptions);
+
+ // Send back the results
+ ALOGD("reporting %zu cameras available", hidlCameras.size());
+ _hidl_cb(hidlCameras);
+
+ // HIDL convention says we return Void if we sent our result back via callback
+ return Void();
+}
+
+
+Return<sp<IEvsCamera_1_0>> EvsEnumerator::openCamera(const hidl_string& cameraId) {
+ ALOGD("openCamera");
+
+ // Find the named camera
+ CameraRecord *pRecord = nullptr;
+ for (auto &&cam : sCameraList) {
+ if (cam.desc.cameraId == cameraId) {
+ // Found a match!
+ pRecord = &cam;
+ break;
+ }
+ }
+
+ // Is this a recognized camera id?
+ if (!pRecord) {
+ ALOGE("Requested camera %s not found", cameraId.c_str());
+ return nullptr;
+ }
+
+ // Has this camera already been instantiated by another caller?
+ sp<EvsCamera> pActiveCamera = pRecord->activeInstance.promote();
+ if (pActiveCamera != nullptr) {
+ ALOGW("Killing previous camera because of new caller");
+ closeCamera(pActiveCamera);
+ }
+
+ // Construct a camera instance for the caller
+ pActiveCamera = new EvsCamera(cameraId.c_str());
+ pRecord->activeInstance = pActiveCamera;
+ if (pActiveCamera == nullptr) {
+ ALOGE("Failed to allocate new EvsCamera object for %s\n", cameraId.c_str());
+ }
+
+ return pActiveCamera;
+}
+
+
+Return<void> EvsEnumerator::closeCamera(const ::android::sp<IEvsCamera_1_0>& pCamera) {
+ ALOGD("closeCamera");
+
+ auto pCamera_1_1 = IEvsCamera_1_1::castFrom(pCamera).withDefault(nullptr);
+ if (pCamera_1_1 == nullptr) {
+ ALOGE("Ignoring call to closeCamera with null camera ptr");
+ return Void();
+ }
+
+ // Get the camera id so we can find it in our list
+ std::string cameraId;
+ pCamera_1_1->getCameraInfo([&cameraId](CameraDesc desc) {
+ cameraId = desc.cameraId;
+ }
+ );
+
+ // Find the named camera
+ CameraRecord *pRecord = nullptr;
+ for (auto &&cam : sCameraList) {
+ if (cam.desc.cameraId == cameraId) {
+ // Found a match!
+ pRecord = &cam;
+ break;
+ }
+ }
+
+ // Is the display being destroyed actually the one we think is active?
+ if (!pRecord) {
+ ALOGE("Asked to close a camera who's name isn't recognized");
+ } else {
+ sp<EvsCamera> pActiveCamera = pRecord->activeInstance.promote();
+
+ if (pActiveCamera == nullptr) {
+ ALOGE("Somehow a camera is being destroyed when the enumerator didn't know one existed");
+ } else if (pActiveCamera != pCamera_1_1) {
+ // This can happen if the camera was aggressively reopened, orphaning this previous instance
+ ALOGW("Ignoring close of previously orphaned camera - why did a client steal?");
+ } else {
+ // Drop the active camera
+ pActiveCamera->forceShutdown();
+ pRecord->activeInstance = nullptr;
+ }
+ }
+
+ return Void();
+}
+
+
+Return<sp<IEvsDisplay>> EvsEnumerator::openDisplay() {
+ ALOGD("openDisplay");
+
+ // If we already have a display active, then we need to shut it down so we can
+ // give exclusive access to the new caller.
+ sp<EvsDisplay> pActiveDisplay = sActiveDisplay.promote();
+ if (pActiveDisplay != nullptr) {
+ ALOGW("Killing previous display because of new caller");
+ closeDisplay(pActiveDisplay);
+ }
+
+ // Create a new display interface and return it
+ pActiveDisplay = new EvsDisplay();
+ sActiveDisplay = pActiveDisplay;
+
+ ALOGD("Returning new EvsDisplay object %p", pActiveDisplay.get());
+ return pActiveDisplay;
+}
+
+
+Return<void> EvsEnumerator::closeDisplay(const ::android::sp<IEvsDisplay>& pDisplay) {
+ ALOGD("closeDisplay");
+
+ // Do we still have a display object we think should be active?
+ sp<EvsDisplay> pActiveDisplay = sActiveDisplay.promote();
+ if (pActiveDisplay == nullptr) {
+ ALOGE("Somehow a display is being destroyed when the enumerator didn't know one existed");
+ } else if (sActiveDisplay != pDisplay) {
+ ALOGW("Ignoring close of previously orphaned display - why did a client steal?");
+ } else {
+ // Drop the active display
+ pActiveDisplay->forceShutdown();
+ sActiveDisplay = nullptr;
+ }
+
+ return Void();
+}
+
+
+Return<DisplayState> EvsEnumerator::getDisplayState() {
+ ALOGD("getDisplayState");
+
+ // Do we still have a display object we think should be active?
+ sp<IEvsDisplay> pActiveDisplay = sActiveDisplay.promote();
+ if (pActiveDisplay != nullptr) {
+ return pActiveDisplay->getDisplayState();
+ } else {
+ return DisplayState::NOT_OPEN;
+ }
+}
+
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/evs/1.1/default/EvsEnumerator.h b/automotive/evs/1.1/default/EvsEnumerator.h
new file mode 100644
index 0000000..11c2170
--- /dev/null
+++ b/automotive/evs/1.1/default/EvsEnumerator.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERAENUMERATOR_H
+#define ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERAENUMERATOR_H
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
+
+#include <list>
+
+using ::android::hardware::automotive::evs::V1_0::EvsResult;
+using ::android::hardware::automotive::evs::V1_0::IEvsDisplay;
+using ::android::hardware::automotive::evs::V1_0::DisplayState;
+using ::android::hardware::automotive::evs::V1_0::IEvsEnumerator;
+using IEvsCamera_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCamera;
+using IEvsCamera_1_1 = ::android::hardware::automotive::evs::V1_1::IEvsCamera;
+using CameraDesc_1_0 = ::android::hardware::automotive::evs::V1_0::CameraDesc;
+
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace V1_1 {
+namespace implementation {
+
+
+class EvsCamera; // from EvsCamera.h
+class EvsDisplay; // from EvsDisplay.h
+
+
+class EvsEnumerator : public IEvsEnumerator {
+public:
+ // Methods from ::android::hardware::automotive::evs::V1_0::IEvsEnumerator follow.
+ Return<void> getCameraList(getCameraList_cb _hidl_cb) override;
+ Return<sp<IEvsCamera_1_0>> openCamera(const hidl_string& cameraId) override;
+ Return<void> closeCamera(const ::android::sp<IEvsCamera_1_0>& carCamera) override;
+ Return<sp<IEvsDisplay>> openDisplay() override;
+ Return<void> closeDisplay(const ::android::sp<IEvsDisplay>& display) override;
+ Return<DisplayState> getDisplayState() override;
+
+ // Implementation details
+ EvsEnumerator();
+
+private:
+ // NOTE: All members values are static so that all clients operate on the same state
+ // That is to say, this is effectively a singleton despite the fact that HIDL
+ // constructs a new instance for each client.
+ struct CameraRecord {
+ CameraDesc_1_0 desc;
+ wp<EvsCamera> activeInstance;
+
+ CameraRecord(const char *cameraId) : desc() { desc.cameraId = cameraId; }
+ };
+ static std::list<CameraRecord> sCameraList;
+
+ static wp<EvsDisplay> sActiveDisplay; // Weak pointer. Object destructs if client dies.
+};
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERAENUMERATOR_H
diff --git a/automotive/evs/1.1/default/ServiceNames.h b/automotive/evs/1.1/default/ServiceNames.h
new file mode 100644
index 0000000..1178da5
--- /dev/null
+++ b/automotive/evs/1.1/default/ServiceNames.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const static char kEnumeratorServiceName[] = "EvsEnumeratorHw";
diff --git a/automotive/evs/1.1/default/android.hardware.automotive.evs@1.1-service.rc b/automotive/evs/1.1/default/android.hardware.automotive.evs@1.1-service.rc
new file mode 100644
index 0000000..284b3fd
--- /dev/null
+++ b/automotive/evs/1.1/default/android.hardware.automotive.evs@1.1-service.rc
@@ -0,0 +1,5 @@
+service vendor.evs-hal-mock /vendor/bin/hw/android.hardware.automotive.evs@1.1-service
+ class hal
+ user automotive_evs
+ group automotive_evs
+ disabled
diff --git a/automotive/evs/1.1/default/service.cpp b/automotive/evs/1.1/default/service.cpp
new file mode 100644
index 0000000..128a14a
--- /dev/null
+++ b/automotive/evs/1.1/default/service.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.automotive.evs@1.1-service"
+
+#include <unistd.h>
+
+#include <hidl/HidlTransportSupport.h>
+#include <log/log.h>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+#include "ServiceNames.h"
+#include "EvsEnumerator.h"
+#include "EvsDisplay.h"
+
+
+// libhidl:
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+
+// Generated HIDL files
+using android::hardware::automotive::evs::V1_0::IEvsEnumerator;
+using android::hardware::automotive::evs::V1_0::IEvsDisplay;
+
+// The namespace in which all our implementation code lives
+using namespace android::hardware::automotive::evs::V1_1::implementation;
+using namespace android;
+
+
+int main() {
+ ALOGI("EVS Hardware Enumerator service is starting");
+ android::sp<IEvsEnumerator> service = new EvsEnumerator();
+
+ configureRpcThreadpool(1, true /* callerWillJoin */);
+
+ // Register our service -- if somebody is already registered by our name,
+ // they will be killed (their thread pool will throw an exception).
+ status_t status = service->registerAsService(kEnumeratorServiceName);
+ if (status == OK) {
+ ALOGD("%s is ready.", kEnumeratorServiceName);
+ joinRpcThreadpool();
+ } else {
+ ALOGE("Could not register service %s (%d).", kEnumeratorServiceName, status);
+ }
+
+ // In normal operation, we don't expect the thread pool to exit
+ ALOGE("EVS Hardware Enumerator is shutting down");
+ return 1;
+}
diff --git a/automotive/evs/1.1/types.hal b/automotive/evs/1.1/types.hal
new file mode 100644
index 0000000..ff6ab4e
--- /dev/null
+++ b/automotive/evs/1.1/types.hal
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs@1.1;
+
+import @1.0::CameraDesc;
+import @1.0::DisplayDesc;
+import @1.0::DisplayState;
+import @1.0::EvsResult;
+import android.hardware.graphics.common@1.2::HardwareBuffer;
+
+/**
+ * Structure representing an image buffer through our APIs
+ *
+ * In addition to the handle to the graphics memory, we need to retain
+ * the properties of the buffer for easy reference and reconstruction of
+ * an ANativeWindowBuffer object on the remote side of API calls.
+ * (Not least because OpenGL expect an ANativeWindowBuffer* for us as a
+ * texture via eglCreateImageKHR().
+ */
+struct BufferDesc {
+ /**
+ * HIDL counterpart of `AHardwareBuffer_Desc`. Please see
+ * hardware/interfaces/graphics/common/1.2/types.hal for more details.
+ */
+ HardwareBuffer buffer;
+ /**
+ * The size of a pixel in the units of bytes
+ */
+ uint32_t pixelSize;
+ /**
+ * Opaque value from driver
+ */
+ uint32_t bufferId;
+};
+
+/**
+ * EVS event types
+ */
+enum EvsEventType : uint32_t {
+ /**
+ * Video stream is started
+ */
+ STREAM_STARTED = 0,
+ /**
+ * Video stream is stopped
+ */
+ STREAM_STOPPED,
+ /**
+ * Video frame is dropped
+ */
+ FRAME_DROPPED,
+ /**
+ * Timeout happens
+ */
+ TIMEOUT,
+};
+
+/**
+ * EVS event definition
+ */
+safe_union EvsEvent {
+ /**
+ * A buffer descriptor of an image frame
+ */
+ BufferDesc buffer;
+ /**
+ * General streaming events
+ */
+ EvsEventType info;
+};
diff --git a/automotive/evs/1.1/vts/functional/Android.bp b/automotive/evs/1.1/vts/functional/Android.bp
new file mode 100644
index 0000000..55c50a4
--- /dev/null
+++ b/automotive/evs/1.1/vts/functional/Android.bp
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+ name: "VtsHalEvsV1_1TargetTest",
+ srcs: [
+ "FrameHandler.cpp",
+ "VtsHalEvsV1_1TargetTest.cpp",
+ ],
+ defaults: ["VtsHalTargetTestDefaults"],
+ shared_libs: [
+ "libui",
+ ],
+ static_libs: [
+ "android.hardware.automotive.evs@1.0",
+ "android.hardware.automotive.evs@1.1",
+ "android.hardware.automotive.evs@common-default-lib",
+ "android.hardware.graphics.common@1.0",
+ "android.hardware.graphics.common@1.1",
+ "android.hardware.graphics.common@1.2",
+ ],
+ test_suites: ["general-tests"],
+ cflags: [
+ "-O0",
+ "-g",
+ ],
+}
diff --git a/automotive/evs/1.1/vts/functional/FrameHandler.cpp b/automotive/evs/1.1/vts/functional/FrameHandler.cpp
new file mode 100644
index 0000000..b7c7f21
--- /dev/null
+++ b/automotive/evs/1.1/vts/functional/FrameHandler.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VtsHalEvsTest"
+
+#include "FrameHandler.h"
+#include "FormatConvert.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <android/log.h>
+#include <cutils/native_handle.h>
+#include <ui/GraphicBuffer.h>
+
+FrameHandler::FrameHandler(android::sp <IEvsCamera> pCamera, CameraDesc cameraInfo,
+ android::sp <IEvsDisplay> pDisplay,
+ BufferControlFlag mode) :
+ mCamera(pCamera),
+ mCameraInfo(cameraInfo),
+ mDisplay(pDisplay),
+ mReturnMode(mode) {
+ // Nothing but member initialization here...
+}
+
+
+void FrameHandler::shutdown()
+{
+ // Make sure we're not still streaming
+ blockingStopStream();
+
+ // At this point, the receiver thread is no longer running, so we can safely drop
+ // our remote object references so they can be freed
+ mCamera = nullptr;
+ mDisplay = nullptr;
+}
+
+
+bool FrameHandler::startStream() {
+ // Tell the camera to start streaming
+ Return<EvsResult> result = mCamera->startVideoStream(this);
+ if (result != EvsResult::OK) {
+ return false;
+ }
+
+ // Mark ourselves as running
+ mLock.lock();
+ mRunning = true;
+ mLock.unlock();
+
+ return true;
+}
+
+
+void FrameHandler::asyncStopStream() {
+ // Tell the camera to stop streaming.
+ // This will result in a null frame being delivered when the stream actually stops.
+ mCamera->stopVideoStream();
+}
+
+
+void FrameHandler::blockingStopStream() {
+ // Tell the stream to stop
+ asyncStopStream();
+
+ // Wait until the stream has actually stopped
+ std::unique_lock<std::mutex> lock(mLock);
+ if (mRunning) {
+ mSignal.wait(lock, [this]() { return !mRunning; });
+ }
+}
+
+
+bool FrameHandler::returnHeldBuffer() {
+ std::unique_lock<std::mutex> lock(mLock);
+
+ // Return the oldest buffer we're holding
+ if (mHeldBuffers.empty()) {
+ // No buffers are currently held
+ return false;
+ }
+
+ BufferDesc_1_1 buffer = mHeldBuffers.front();
+ mHeldBuffers.pop();
+ mCamera->doneWithFrame_1_1(buffer);
+
+ return true;
+}
+
+
+bool FrameHandler::isRunning() {
+ std::unique_lock<std::mutex> lock(mLock);
+ return mRunning;
+}
+
+
+void FrameHandler::waitForFrameCount(unsigned frameCount) {
+ // Wait until we've seen at least the requested number of frames (could be more)
+ std::unique_lock<std::mutex> lock(mLock);
+ mSignal.wait(lock, [this, frameCount](){ return mFramesReceived >= frameCount; });
+}
+
+
+void FrameHandler::getFramesCounters(unsigned* received, unsigned* displayed) {
+ std::unique_lock<std::mutex> lock(mLock);
+
+ if (received) {
+ *received = mFramesReceived;
+ }
+ if (displayed) {
+ *displayed = mFramesDisplayed;
+ }
+}
+
+
+Return<void> FrameHandler::deliverFrame(const BufferDesc_1_0& bufferArg) {
+ ALOGW("A frame delivered via v1.0 method is rejected.");
+ mCamera->doneWithFrame(bufferArg);
+ return Void();
+}
+
+
+Return<void> FrameHandler::notifyEvent(const EvsEvent& event) {
+ // Local flag we use to keep track of when the stream is stopping
+ bool timeToStop = false;
+
+ auto type = event.getDiscriminator();
+ if (type == EvsEvent::hidl_discriminator::info) {
+ if (event.info() == EvsEventType::STREAM_STOPPED) {
+ // Signal that the last frame has been received and the stream is stopped
+ timeToStop = true;
+ } else {
+ ALOGD("Received an event 0x%X", event.info());
+ }
+ } else {
+ auto bufDesc = event.buffer();
+ const AHardwareBuffer_Desc* pDesc =
+ reinterpret_cast<const AHardwareBuffer_Desc *>(&bufDesc.buffer.description);
+ ALOGD("Received a frame from the camera (%p)",
+ bufDesc.buffer.nativeHandle.getNativeHandle());
+
+ // Store a dimension of a received frame.
+ mFrameWidth = pDesc->width;
+ mFrameHeight = pDesc->height;
+
+ // If we were given an opened display at construction time, then send the received
+ // image back down the camera.
+ if (mDisplay.get()) {
+ // Get the output buffer we'll use to display the imagery
+ BufferDesc_1_0 tgtBuffer = {};
+ mDisplay->getTargetBuffer([&tgtBuffer](const BufferDesc_1_0& buff) {
+ tgtBuffer = buff;
+ }
+ );
+
+ if (tgtBuffer.memHandle == nullptr) {
+ printf("Didn't get target buffer - frame lost\n");
+ ALOGE("Didn't get requested output buffer -- skipping this frame.");
+ } else {
+ // Copy the contents of the of buffer.memHandle into tgtBuffer
+ copyBufferContents(tgtBuffer, bufDesc);
+
+ // Send the target buffer back for display
+ Return<EvsResult> result = mDisplay->returnTargetBufferForDisplay(tgtBuffer);
+ if (!result.isOk()) {
+ printf("HIDL error on display buffer (%s)- frame lost\n",
+ result.description().c_str());
+ ALOGE("Error making the remote function call. HIDL said %s",
+ result.description().c_str());
+ } else if (result != EvsResult::OK) {
+ printf("Display reported error - frame lost\n");
+ ALOGE("We encountered error %d when returning a buffer to the display!",
+ (EvsResult) result);
+ } else {
+ // Everything looks good!
+ // Keep track so tests or watch dogs can monitor progress
+ mLock.lock();
+ mFramesDisplayed++;
+ mLock.unlock();
+ }
+ }
+ }
+
+
+ switch (mReturnMode) {
+ case eAutoReturn:
+ // Send the camera buffer back now that the client has seen it
+ ALOGD("Calling doneWithFrame");
+ // TODO: Why is it that we get a HIDL crash if we pass back the cloned buffer?
+ mCamera->doneWithFrame_1_1(bufDesc);
+ break;
+ case eNoAutoReturn:
+ // Hang onto the buffer handle for now -- the client will return it explicitly later
+ mHeldBuffers.push(bufDesc);
+ }
+
+
+ ALOGD("Frame handling complete");
+ }
+
+
+ // Update our received frame count and notify anybody who cares that things have changed
+ mLock.lock();
+ if (timeToStop) {
+ mRunning = false;
+ } else {
+ mFramesReceived++;
+ }
+ mLock.unlock();
+ mSignal.notify_all();
+
+ return Void();
+}
+
+
+bool FrameHandler::copyBufferContents(const BufferDesc_1_0& tgtBuffer,
+ const BufferDesc_1_1& srcBuffer) {
+ bool success = true;
+ const AHardwareBuffer_Desc* pSrcDesc =
+ reinterpret_cast<const AHardwareBuffer_Desc *>(&srcBuffer.buffer.description);
+
+ // Make sure we don't run off the end of either buffer
+ const unsigned width = std::min(tgtBuffer.width,
+ pSrcDesc->width);
+ const unsigned height = std::min(tgtBuffer.height,
+ pSrcDesc->height);
+
+ sp<android::GraphicBuffer> tgt = new android::GraphicBuffer(tgtBuffer.memHandle,
+ android::GraphicBuffer::CLONE_HANDLE,
+ tgtBuffer.width,
+ tgtBuffer.height,
+ tgtBuffer.format,
+ 1,
+ tgtBuffer.usage,
+ tgtBuffer.stride);
+ sp<android::GraphicBuffer> src = new android::GraphicBuffer(srcBuffer.buffer.nativeHandle,
+ android::GraphicBuffer::CLONE_HANDLE,
+ pSrcDesc->width,
+ pSrcDesc->height,
+ pSrcDesc->format,
+ pSrcDesc->layers,
+ pSrcDesc->usage,
+ pSrcDesc->stride);
+
+ // Lock our source buffer for reading (current expectation are for this to be NV21 format)
+ uint8_t* srcPixels = nullptr;
+ src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void**)&srcPixels);
+
+ // Lock our target buffer for writing (should be either RGBA8888 or BGRA8888 format)
+ uint32_t* tgtPixels = nullptr;
+ tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
+
+ if (srcPixels && tgtPixels) {
+ using namespace ::android::hardware::automotive::evs::common;
+ if (tgtBuffer.format == HAL_PIXEL_FORMAT_RGBA_8888) {
+ if (pSrcDesc->format == HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
+ Utils::copyNV21toRGB32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+ Utils::copyYV12toRGB32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+ Utils::copyYUYVtoRGB32(width, height,
+ srcPixels, pSrcDesc->stride,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == tgtBuffer.format) { // 32bit RGBA
+ Utils::copyMatchedInterleavedFormats(width, height,
+ srcPixels, pSrcDesc->stride,
+ tgtPixels, tgtBuffer.stride,
+ tgtBuffer.pixelSize);
+ } else {
+ ALOGE("Camera buffer format is not supported");
+ success = false;
+ }
+ } else if (tgtBuffer.format == HAL_PIXEL_FORMAT_BGRA_8888) {
+ if (pSrcDesc->format == HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
+ Utils::copyNV21toBGR32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+ Utils::copyYV12toBGR32(width, height,
+ srcPixels,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+ Utils::copyYUYVtoBGR32(width, height,
+ srcPixels, pSrcDesc->stride,
+ tgtPixels, tgtBuffer.stride);
+ } else if (pSrcDesc->format == tgtBuffer.format) { // 32bit RGBA
+ Utils::copyMatchedInterleavedFormats(width, height,
+ srcPixels, pSrcDesc->stride,
+ tgtPixels, tgtBuffer.stride,
+ tgtBuffer.pixelSize);
+ } else {
+ ALOGE("Camera buffer format is not supported");
+ success = false;
+ }
+ } else {
+ // We always expect 32 bit RGB for the display output for now. Is there a need for 565?
+ ALOGE("Diplay buffer is always expected to be 32bit RGBA");
+ success = false;
+ }
+ } else {
+ ALOGE("Failed to lock buffer contents for contents transfer");
+ success = false;
+ }
+
+ if (srcPixels) {
+ src->unlock();
+ }
+ if (tgtPixels) {
+ tgt->unlock();
+ }
+
+ return success;
+}
+
+void FrameHandler::getFrameDimension(unsigned* width, unsigned* height) {
+ if (width) {
+ *width = mFrameWidth;
+ }
+
+ if (height) {
+ *height = mFrameHeight;
+ }
+}
diff --git a/automotive/evs/1.1/vts/functional/FrameHandler.h b/automotive/evs/1.1/vts/functional/FrameHandler.h
new file mode 100644
index 0000000..49fa736
--- /dev/null
+++ b/automotive/evs/1.1/vts/functional/FrameHandler.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EVS_VTS_FRAMEHANDLER_H
+#define EVS_VTS_FRAMEHANDLER_H
+
+#include <queue>
+
+#include <FrameHandler.h>
+
+#include <android/hardware/automotive/evs/1.1/IEvsCameraStream.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
+#include <android/hardware/automotive/evs/1.0/IEvsDisplay.h>
+
+using namespace ::android::hardware::automotive::evs::V1_1;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::sp;
+using ::android::hardware::automotive::evs::V1_0::IEvsDisplay;
+using ::android::hardware::automotive::evs::V1_0::EvsResult;
+using ::android::hardware::automotive::evs::V1_0::CameraDesc;
+using BufferDesc_1_0 = ::android::hardware::automotive::evs::V1_0::BufferDesc;
+using BufferDesc_1_1 = ::android::hardware::automotive::evs::V1_1::BufferDesc;
+
+
+/*
+ * FrameHandler:
+ * This class can be used to receive camera imagery from an IEvsCamera implementation. Given an
+ * IEvsDisplay instance at startup, it will forward the received imagery to the display,
+ * providing a trivial implementation of a rear vew camera type application.
+ * Note that the video frames are delivered on a background thread, while the control interface
+ * is actuated from the applications foreground thread.
+ */
+class FrameHandler : public IEvsCameraStream {
+public:
+ enum BufferControlFlag {
+ eAutoReturn,
+ eNoAutoReturn,
+ };
+
+ FrameHandler(android::sp <IEvsCamera> pCamera, CameraDesc cameraInfo,
+ android::sp <IEvsDisplay> pDisplay = nullptr,
+ BufferControlFlag mode = eAutoReturn);
+ void shutdown();
+
+ bool startStream();
+ void asyncStopStream();
+ void blockingStopStream();
+
+ bool returnHeldBuffer();
+
+ bool isRunning();
+
+ void waitForFrameCount(unsigned frameCount);
+ void getFramesCounters(unsigned* received, unsigned* displayed);
+ void getFrameDimension(unsigned* width, unsigned* height);
+
+private:
+ // Implementation for ::android::hardware::automotive::evs::V1_1::IEvsCameraStream
+ Return<void> deliverFrame(const BufferDesc_1_0& buffer) override;
+ Return<void> notifyEvent(const EvsEvent& event) override;
+
+ // Local implementation details
+ bool copyBufferContents(const BufferDesc_1_0& tgtBuffer, const BufferDesc_1_1& srcBuffer);
+
+ // Values initialized as startup
+ android::sp <IEvsCamera> mCamera;
+ CameraDesc mCameraInfo;
+ android::sp <IEvsDisplay> mDisplay;
+ BufferControlFlag mReturnMode;
+
+ // Since we get frames delivered to us asynchronously via the IEvsCameraStream interface,
+ // we need to protect all member variables that may be modified while we're streaming
+ // (ie: those below)
+ std::mutex mLock;
+ std::condition_variable mSignal;
+
+ std::queue<BufferDesc_1_1> mHeldBuffers;
+ bool mRunning = false;
+ unsigned mFramesReceived = 0; // Simple counter -- rolls over eventually!
+ unsigned mFramesDisplayed = 0; // Simple counter -- rolls over eventually!
+ unsigned mFrameWidth = 0;
+ unsigned mFrameHeight = 0;
+};
+
+
+#endif //EVS_VTS_FRAMEHANDLER_H
diff --git a/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp b/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp
new file mode 100644
index 0000000..4f7082a
--- /dev/null
+++ b/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VtsHalEvsTest"
+
+
+// Note: We have't got a great way to indicate which target
+// should be tested, so we'll leave the interface served by the
+// default (mock) EVS driver here for easy reference. All
+// actual EVS drivers should serve on the EvsEnumeratorHw name,
+// however, so the code is checked in that way.
+//const static char kEnumeratorName[] = "EvsEnumeratorHw-Mock";
+const static char kEnumeratorName[] = "EvsEnumeratorHw";
+
+
+// These values are called out in the EVS design doc (as of Mar 8, 2017)
+static const int kMaxStreamStartMilliseconds = 500;
+static const int kMinimumFramesPerSecond = 10;
+
+static const int kSecondsToMilliseconds = 1000;
+static const int kMillisecondsToMicroseconds = 1000;
+static const float kNanoToMilliseconds = 0.000001f;
+static const float kNanoToSeconds = 0.000000001f;
+
+
+#include "FrameHandler.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <hidl/HidlTransportSupport.h>
+#include <hwbinder/ProcessState.h>
+#include <log/log.h>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+#include <android/log.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
+#include <android/hardware/automotive/evs/1.1/IEvsCameraStream.h>
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include <android/hardware/automotive/evs/1.0/IEvsDisplay.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
+
+using namespace ::android::hardware::automotive::evs::V1_1;
+
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_string;
+using ::android::sp;
+using ::android::hardware::automotive::evs::V1_0::CameraDesc;
+using ::android::hardware::automotive::evs::V1_0::DisplayDesc;
+using ::android::hardware::automotive::evs::V1_0::DisplayState;
+using ::android::hardware::automotive::evs::V1_0::IEvsEnumerator;
+using IEvsCamera_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCamera;
+using IEvsCamera_1_1 = ::android::hardware::automotive::evs::V1_1::IEvsCamera;
+
+// Test environment for Evs HIDL HAL.
+class EvsHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+ public:
+ // get the test environment singleton
+ static EvsHidlEnvironment* Instance() {
+ static EvsHidlEnvironment* instance = new EvsHidlEnvironment;
+ return instance;
+ }
+
+ virtual void registerTestServices() override { registerTestService<IEvsEnumerator>(); }
+
+ private:
+ EvsHidlEnvironment() {}
+};
+
+// The main test class for EVS
+class EvsHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+public:
+ virtual void SetUp() override {
+ // Make sure we can connect to the enumerator
+ string service_name =
+ EvsHidlEnvironment::Instance()->getServiceName<IEvsEnumerator>(kEnumeratorName);
+ pEnumerator = getService<IEvsEnumerator>(service_name);
+ ASSERT_NE(pEnumerator.get(), nullptr);
+
+ mIsHwModule = !service_name.compare(kEnumeratorName);
+ }
+
+ virtual void TearDown() override {}
+
+protected:
+ void loadCameraList() {
+ // SetUp() must run first!
+ assert(pEnumerator != nullptr);
+
+ // Get the camera list
+ pEnumerator->getCameraList([this](hidl_vec <CameraDesc> cameraList) {
+ ALOGI("Camera list callback received %zu cameras",
+ cameraList.size());
+ cameraInfo.reserve(cameraList.size());
+ for (auto&& cam: cameraList) {
+ ALOGI("Found camera %s", cam.cameraId.c_str());
+ cameraInfo.push_back(cam);
+ }
+ }
+ );
+
+ // We insist on at least one camera for EVS to pass any camera tests
+ ASSERT_GE(cameraInfo.size(), 1u);
+ }
+
+ sp<IEvsEnumerator> pEnumerator; // Every test needs access to the service
+ std::vector <CameraDesc> cameraInfo; // Empty unless/until loadCameraList() is called
+ bool mIsHwModule; // boolean to tell current module under testing
+ // is HW module implementation.
+};
+
+
+// Test cases, their implementations, and corresponding requirements are
+// documented at go/aae-evs-public-api-test.
+
+/*
+ * CameraOpenClean:
+ * Opens each camera reported by the enumerator and then explicitly closes it via a
+ * call to closeCamera. Then repeats the test to ensure all cameras can be reopened.
+ */
+TEST_F(EvsHidlTest, CameraOpenClean) {
+ ALOGI("Starting CameraOpenClean test");
+
+ // Get the camera list
+ loadCameraList();
+
+ // Open and close each camera twice
+ for (auto&& cam: cameraInfo) {
+ for (int pass = 0; pass < 2; pass++) {
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Verify that this camera self-identifies correctly
+ pCam->getCameraInfo([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.cameraId.c_str());
+ EXPECT_EQ(cam.cameraId, desc.cameraId);
+ }
+ );
+
+ // Explicitly close the camera so resources are released right away
+ pEnumerator->closeCamera(pCam);
+ }
+ }
+}
+
+
+/*
+ * CameraOpenAggressive:
+ * Opens each camera reported by the enumerator twice in a row without an intervening closeCamera
+ * call. This ensures that the intended "aggressive open" behavior works. This is necessary for
+ * the system to be tolerant of shutdown/restart race conditions.
+ */
+TEST_F(EvsHidlTest, CameraOpenAggressive) {
+ ALOGI("Starting CameraOpenAggressive test");
+
+ // Get the camera list
+ loadCameraList();
+
+ // Open and close each camera twice
+ for (auto&& cam: cameraInfo) {
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Verify that this camera self-identifies correctly
+ pCam->getCameraInfo([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.cameraId.c_str());
+ EXPECT_EQ(cam.cameraId, desc.cameraId);
+ }
+ );
+
+ sp<IEvsCamera_1_1> pCam2 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, pCam2);
+ ASSERT_NE(pCam2, nullptr);
+
+ Return<EvsResult> result = pCam->setMaxFramesInFlight(2);
+ if (mIsHwModule) {
+ // Verify that the old camera rejects calls via HW module.
+ EXPECT_EQ(EvsResult::OWNERSHIP_LOST, EvsResult(result));
+ } else {
+ // default implementation supports multiple clients.
+ EXPECT_EQ(EvsResult::OK, EvsResult(result));
+ }
+
+ // Close the superceded camera
+ pEnumerator->closeCamera(pCam);
+
+ // Verify that the second camera instance self-identifies correctly
+ pCam2->getCameraInfo([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.cameraId.c_str());
+ EXPECT_EQ(cam.cameraId, desc.cameraId);
+ }
+ );
+
+ // Close the second camera instance
+ pEnumerator->closeCamera(pCam2);
+ }
+
+ // Sleep here to ensure the destructor cleanup has time to run so we don't break follow on tests
+ sleep(1); // I hate that this is an arbitrary time to wait. :( b/36122635
+}
+
+
+/*
+ * CameraStreamPerformance:
+ * Measure and qualify the stream start up time and streaming frame rate of each reported camera
+ */
+TEST_F(EvsHidlTest, CameraStreamPerformance) {
+ ALOGI("Starting CameraStreamPerformance test");
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Set up a frame receiver object which will fire up its own thread
+ sp<FrameHandler> frameHandler = new FrameHandler(pCam, cam,
+ nullptr,
+ FrameHandler::eAutoReturn);
+
+ // Start the camera's video stream
+ nsecs_t start = systemTime(SYSTEM_TIME_MONOTONIC);
+ bool startResult = frameHandler->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Ensure the first frame arrived within the expected time
+ frameHandler->waitForFrameCount(1);
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+ nsecs_t timeToFirstFrame = systemTime(SYSTEM_TIME_MONOTONIC) - start;
+ EXPECT_LE(nanoseconds_to_milliseconds(timeToFirstFrame), kMaxStreamStartMilliseconds);
+ printf("Measured time to first frame %0.2f ms\n", timeToFirstFrame * kNanoToMilliseconds);
+ ALOGI("Measured time to first frame %0.2f ms", timeToFirstFrame * kNanoToMilliseconds);
+
+ // Check aspect ratio
+ unsigned width = 0, height = 0;
+ frameHandler->getFrameDimension(&width, &height);
+ EXPECT_GE(width, height);
+
+ // Wait a bit, then ensure we get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+ unsigned framesReceived = 0;
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ framesReceived = framesReceived - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond = framesReceived / (runTime * kNanoToSeconds);
+ printf("Measured camera rate %3.2f fps\n", framesPerSecond);
+ ALOGI("Measured camera rate %3.2f fps", framesPerSecond);
+ EXPECT_GE(framesPerSecond, kMinimumFramesPerSecond);
+
+ // Even when the camera pointer goes out of scope, the FrameHandler object will
+ // keep the stream alive unless we tell it to shutdown.
+ // Also note that the FrameHandle and the Camera have a mutual circular reference, so
+ // we have to break that cycle in order for either of them to get cleaned up.
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam);
+ }
+}
+
+
+/*
+ * CameraStreamBuffering:
+ * Ensure the camera implementation behaves properly when the client holds onto buffers for more
+ * than one frame time. The camera must cleanly skip frames until the client is ready again.
+ */
+TEST_F(EvsHidlTest, CameraStreamBuffering) {
+ ALOGI("Starting CameraStreamBuffering test");
+
+ // Arbitrary constant (should be > 1 and less than crazy)
+ static const unsigned int kBuffersToHold = 6;
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Ask for a crazy number of buffers in flight to ensure it errors correctly
+ Return<EvsResult> badResult = pCam->setMaxFramesInFlight(0xFFFFFFFF);
+ EXPECT_EQ(EvsResult::BUFFER_NOT_AVAILABLE, badResult);
+
+ // Now ask for exactly two buffers in flight as we'll test behavior in that case
+ Return<EvsResult> goodResult = pCam->setMaxFramesInFlight(kBuffersToHold);
+ EXPECT_EQ(EvsResult::OK, goodResult);
+
+
+ // Set up a frame receiver object which will fire up its own thread.
+ sp<FrameHandler> frameHandler = new FrameHandler(pCam, cam,
+ nullptr,
+ FrameHandler::eNoAutoReturn);
+
+ // Start the camera's video stream
+ bool startResult = frameHandler->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Check that the video stream stalls once we've gotten exactly the number of buffers
+ // we requested since we told the frameHandler not to return them.
+ sleep(2); // 1 second should be enough for at least 5 frames to be delivered worst case
+ unsigned framesReceived = 0;
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ ASSERT_EQ(kBuffersToHold, framesReceived) << "Stream didn't stall at expected buffer limit";
+
+
+ // Give back one buffer
+ bool didReturnBuffer = frameHandler->returnHeldBuffer();
+ EXPECT_TRUE(didReturnBuffer);
+
+ // Once we return a buffer, it shouldn't take more than 1/10 second to get a new one
+ // filled since we require 10fps minimum -- but give a 10% allowance just in case.
+ usleep(110 * kMillisecondsToMicroseconds);
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ EXPECT_EQ(kBuffersToHold+1, framesReceived) << "Stream should've resumed";
+
+ // Even when the camera pointer goes out of scope, the FrameHandler object will
+ // keep the stream alive unless we tell it to shutdown.
+ // Also note that the FrameHandle and the Camera have a mutual circular reference, so
+ // we have to break that cycle in order for either of them to get cleaned up.
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam);
+ }
+}
+
+
+/*
+ * CameraToDisplayRoundTrip:
+ * End to end test of data flowing from the camera to the display. Each delivered frame of camera
+ * imagery is simply copied to the display buffer and presented on screen. This is the one test
+ * which a human could observe to see the operation of the system on the physical display.
+ */
+TEST_F(EvsHidlTest, CameraToDisplayRoundTrip) {
+ ALOGI("Starting CameraToDisplayRoundTrip test");
+
+ // Get the camera list
+ loadCameraList();
+
+ // Request exclusive access to the EVS display
+ sp<IEvsDisplay> pDisplay = pEnumerator->openDisplay();
+ ASSERT_NE(pDisplay, nullptr);
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Set up a frame receiver object which will fire up its own thread.
+ sp<FrameHandler> frameHandler = new FrameHandler(pCam, cam,
+ pDisplay,
+ FrameHandler::eAutoReturn);
+
+
+ // Activate the display
+ pDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME);
+
+ // Start the camera's video stream
+ bool startResult = frameHandler->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Wait a while to let the data flow
+ static const int kSecondsToWait = 5;
+ const int streamTimeMs = kSecondsToWait * kSecondsToMilliseconds -
+ kMaxStreamStartMilliseconds;
+ const unsigned minimumFramesExpected = streamTimeMs * kMinimumFramesPerSecond /
+ kSecondsToMilliseconds;
+ sleep(kSecondsToWait);
+ unsigned framesReceived = 0;
+ unsigned framesDisplayed = 0;
+ frameHandler->getFramesCounters(&framesReceived, &framesDisplayed);
+ EXPECT_EQ(framesReceived, framesDisplayed);
+ EXPECT_GE(framesDisplayed, minimumFramesExpected);
+
+ // Turn off the display (yes, before the stream stops -- it should be handled)
+ pDisplay->setDisplayState(DisplayState::NOT_VISIBLE);
+
+ // Shut down the streamer
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam);
+ }
+
+ // Explicitly release the display
+ pEnumerator->closeDisplay(pDisplay);
+}
+
+
+/*
+ * MultiCameraStream:
+ * Verify that each client can start and stop video streams on the same
+ * underlying camera.
+ */
+TEST_F(EvsHidlTest, MultiCameraStream) {
+ ALOGI("Starting MultiCameraStream test");
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+ // Create two camera clients.
+ sp<IEvsCamera_1_1> pCam0 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam0, nullptr);
+
+ sp<IEvsCamera_1_1> pCam1 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam1, nullptr);
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ sp<FrameHandler> frameHandler0 = new FrameHandler(pCam0, cam,
+ nullptr,
+ FrameHandler::eAutoReturn);
+ ASSERT_NE(frameHandler0, nullptr);
+
+ sp<FrameHandler> frameHandler1 = new FrameHandler(pCam1, cam,
+ nullptr,
+ FrameHandler::eAutoReturn);
+ ASSERT_NE(frameHandler1, nullptr);
+
+ // Start the camera's video stream via client 0
+ bool startResult = false;
+ startResult = frameHandler0->startStream() &&
+ frameHandler1->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Ensure the stream starts
+ frameHandler0->waitForFrameCount(1);
+ frameHandler1->waitForFrameCount(1);
+
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Wait a bit, then ensure both clients get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+ unsigned framesReceived0 = 0, framesReceived1 = 0;
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+ framesReceived0 = framesReceived0 - 1; // Back out the first frame we already waited for
+ framesReceived1 = framesReceived1 - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond0 = framesReceived0 / (runTime * kNanoToSeconds);
+ float framesPerSecond1 = framesReceived1 / (runTime * kNanoToSeconds);
+ printf("Measured camera rate %3.2f fps and %3.2f fps\n", framesPerSecond0, framesPerSecond1);
+ ALOGI("Measured camera rate %3.2f fps and %3.2f fps", framesPerSecond0, framesPerSecond1);
+ EXPECT_GE(framesPerSecond0, kMinimumFramesPerSecond);
+ EXPECT_GE(framesPerSecond1, kMinimumFramesPerSecond);
+
+ // Shutdown one client
+ frameHandler0->shutdown();
+
+ // Read frame counters again
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+
+ // Wait a bit again
+ sleep(5);
+ unsigned framesReceivedAfterStop0 = 0, framesReceivedAfterStop1 = 0;
+ frameHandler0->getFramesCounters(&framesReceivedAfterStop0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceivedAfterStop1, nullptr);
+ EXPECT_EQ(framesReceived0, framesReceivedAfterStop0);
+ EXPECT_LT(framesReceived1, framesReceivedAfterStop1);
+
+ // Shutdown another
+ frameHandler1->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam0);
+ pEnumerator->closeCamera(pCam1);
+ }
+}
+
+
+int main(int argc, char** argv) {
+ ::testing::AddGlobalTestEnvironment(EvsHidlEnvironment::Instance());
+ ::testing::InitGoogleTest(&argc, argv);
+ EvsHidlEnvironment::Instance()->init(&argc, argv);
+ int status = RUN_ALL_TESTS();
+ ALOGI("Test result = %d", status);
+ return status;
+}
diff --git a/automotive/evs/common/utils/default/Android.bp b/automotive/evs/common/utils/default/Android.bp
new file mode 100644
index 0000000..7734f5c
--- /dev/null
+++ b/automotive/evs/common/utils/default/Android.bp
@@ -0,0 +1,31 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library_static {
+ name: "android.hardware.automotive.evs@common-default-lib",
+ vendor_available: true,
+ relative_install_path: "hw",
+ cflags: [
+ "-O0",
+ "-g",
+ ],
+ srcs: [
+ "FormatConvert.cpp"
+ ],
+ export_include_dirs: ["include"],
+ shared_libs: [
+ ],
+}
diff --git a/automotive/evs/1.0/vts/functional/FormatConvert.cpp b/automotive/evs/common/utils/default/FormatConvert.cpp
similarity index 74%
rename from automotive/evs/1.0/vts/functional/FormatConvert.cpp
rename to automotive/evs/common/utils/default/FormatConvert.cpp
index 3d82d32..d4c7da0 100644
--- a/automotive/evs/1.0/vts/functional/FormatConvert.cpp
+++ b/automotive/evs/common/utils/default/FormatConvert.cpp
@@ -18,10 +18,15 @@
#include "FormatConvert.h"
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace common {
// Round up to the nearest multiple of the given alignment value
template<unsigned alignment>
-int align(int value) {
+int Utils::align(int value) {
static_assert((alignment && !(alignment & (alignment - 1))),
"alignment must be a power of 2");
@@ -31,15 +36,17 @@
// Limit the given value to the provided range. :)
-static inline float clamp(float v, float min, float max) {
+inline float Utils::clamp(float v, float min, float max) {
if (v < min) return min;
if (v > max) return max;
return v;
}
-static uint32_t yuvToRgbx(const unsigned char Y, const unsigned char Uin, const unsigned char Vin,
- bool bgrxFormat = false) {
+uint32_t Utils::yuvToRgbx(const unsigned char Y,
+ const unsigned char Uin,
+ const unsigned char Vin,
+ bool bgrxFormat) {
// Don't use this if you want to see the best performance. :)
// Better to do this in a pixel shader if we really have to, but on actual
// embedded hardware we expect to be able to texture directly from the YUV data
@@ -67,10 +74,10 @@
}
-void copyNV21toRGB32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels,
- bool bgrxFormat)
+void Utils::copyNV21toRGB32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels,
+ bool bgrxFormat)
{
// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
// U/V array. It assumes an even width and height for the overall image, and a horizontal
@@ -99,10 +106,10 @@
}
-void copyYV12toRGB32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels,
- bool bgrxFormat)
+void Utils::copyYV12toRGB32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels,
+ bool bgrxFormat)
{
// The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
// by another 1/2 x 1/2 V array. It assumes an even width and height for the overall image,
@@ -134,10 +141,10 @@
}
-void copyYUYVtoRGB32(unsigned width, unsigned height,
- uint8_t* src, unsigned srcStridePixels,
- uint32_t* dst, unsigned dstStridePixels,
- bool bgrxFormat)
+void Utils::copyYUYVtoRGB32(unsigned width, unsigned height,
+ uint8_t* src, unsigned srcStridePixels,
+ uint32_t* dst, unsigned dstStridePixels,
+ bool bgrxFormat)
{
uint32_t* srcWords = (uint32_t*)src;
@@ -167,34 +174,34 @@
}
-void copyNV21toBGR32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels)
+void Utils::copyNV21toBGR32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels)
{
return copyNV21toRGB32(width, height, src, dst, dstStridePixels, true);
}
-void copyYV12toBGR32(unsigned width, unsigned height,
- uint8_t* src,
- uint32_t* dst, unsigned dstStridePixels)
+void Utils::copyYV12toBGR32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels)
{
return copyYV12toRGB32(width, height, src, dst, dstStridePixels, true);
}
-void copyYUYVtoBGR32(unsigned width, unsigned height,
- uint8_t* src, unsigned srcStridePixels,
- uint32_t* dst, unsigned dstStridePixels)
+void Utils::copyYUYVtoBGR32(unsigned width, unsigned height,
+ uint8_t* src, unsigned srcStridePixels,
+ uint32_t* dst, unsigned dstStridePixels)
{
return copyYUYVtoRGB32(width, height, src, srcStridePixels, dst, dstStridePixels, true);
}
-void copyMatchedInterleavedFormats(unsigned width, unsigned height,
- void* src, unsigned srcStridePixels,
- void* dst, unsigned dstStridePixels,
- unsigned pixelSize) {
+void Utils::copyMatchedInterleavedFormats(unsigned width, unsigned height,
+ void* src, unsigned srcStridePixels,
+ void* dst, unsigned dstStridePixels,
+ unsigned pixelSize) {
for (unsigned row = 0; row < height; row++) {
// Copy the entire row of pixel data
memcpy(dst, src, width * pixelSize);
@@ -204,3 +211,10 @@
dst = (uint8_t*)dst + dstStridePixels * pixelSize;
}
}
+
+} // namespace common
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
diff --git a/automotive/evs/common/utils/default/include/FormatConvert.h b/automotive/evs/common/utils/default/include/FormatConvert.h
new file mode 100644
index 0000000..2bb8955
--- /dev/null
+++ b/automotive/evs/common/utils/default/include/FormatConvert.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EVS_VTS_FORMATCONVERT_H
+#define EVS_VTS_FORMATCONVERT_H
+
+#include <queue>
+#include <stdint.h>
+
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace evs {
+namespace common {
+
+class Utils {
+public:
+ // Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx/BGRx
+ // values. The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+ // U/V array. It assumes an even width and height for the overall image, and a horizontal
+ // stride that is an even multiple of 16 bytes for both the Y and UV arrays.
+ static void copyNV21toRGB32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels,
+ bool bgrxFormat = false);
+
+ static void copyNV21toBGR32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels);
+
+
+ // Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx/BGRx values.
+ // The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
+ // by another 1/2 x 1/2 V array. It assumes an even width and height for the overall image,
+ // and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
+ // and V arrays.
+ static void copyYV12toRGB32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels,
+ bool bgrxFormat = false);
+
+ static void copyYV12toBGR32(unsigned width, unsigned height,
+ uint8_t* src,
+ uint32_t* dst, unsigned dstStridePixels);
+
+ // Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx/BGRx
+ // values. The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+ // U/V array. It assumes an even width and height for the overall image, and a horizontal
+ // stride that is an even multiple of 16 bytes for both the Y and UV arrays.
+ static void copyYUYVtoRGB32(unsigned width, unsigned height,
+ uint8_t* src, unsigned srcStrideBytes,
+ uint32_t* dst, unsigned dstStrideBytes,
+ bool bgrxFormat = false);
+
+ static void copyYUYVtoBGR32(unsigned width, unsigned height,
+ uint8_t* src, unsigned srcStrideBytes,
+ uint32_t* dst, unsigned dstStrideBytes);
+
+
+ // Given an simple rectangular image buffer with an integer number of bytes per pixel,
+ // copy the pixel values into a new rectangular buffer (potentially with a different stride).
+ // This is typically used to copy RGBx data into an RGBx output buffer.
+ static void copyMatchedInterleavedFormats(unsigned width, unsigned height,
+ void* src, unsigned srcStridePixels,
+ void* dst, unsigned dstStridePixels,
+ unsigned pixelSize);
+
+private:
+ template<unsigned alignment>
+ static int align(int value);
+
+ static inline float clamp(float v, float min, float max);
+ static uint32_t yuvToRgbx(const unsigned char Y,
+ const unsigned char Uin,
+ const unsigned char Vin,
+ bool bgrxFormat = false);
+};
+
+} // namespace common
+} // namespace evs
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // EVS_VTS_FORMATCONVERT_H
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index 7bafd2c..bc9b94d 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -422,6 +422,17 @@
.areaId = toInt(VehicleAreaWindow::REAR_WINDSHIELD)}}},
.initialValue = {.int32Values = {0}} // Will be used for all areas.
},
+ {
+ .config = {.prop = toInt(VehicleProperty::HVAC_ELECTRIC_DEFROSTER_ON),
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .areaConfigs =
+ {VehicleAreaConfig{
+ .areaId = toInt(VehicleAreaWindow::FRONT_WINDSHIELD)},
+ VehicleAreaConfig{
+ .areaId = toInt(VehicleAreaWindow::REAR_WINDSHIELD)}}},
+ .initialValue = {.int32Values = {0}} // Will be used for all areas.
+ },
{.config = {.prop = toInt(VehicleProperty::HVAC_MAX_DEFROST_ON),
.access = VehiclePropertyAccess::READ_WRITE,
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index f6ebcdd..6232dd5 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -799,7 +799,7 @@
| VehicleArea:SEAT),
/**
- * On/off defrost for designated window
+ * Fan-based defrost for designated window.
*
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
* @access VehiclePropertyAccess:READ_WRITE
@@ -1076,6 +1076,7 @@
*
* @change_mode VehiclePropertyChangeMode:STATIC
* @access VehiclePropertyAccess:READ
+ * @data_enum VehicleHvacFanDirection
*/
HVAC_FAN_DIRECTION_AVAILABLE = (
0x0511
@@ -1118,6 +1119,18 @@
| VehiclePropertyType:INT32
| VehicleArea:SEAT),
+ /**
+ * Electric defrosters' status
+ *
+ * @change_mode VehiclePropertyChangeMode:ON_CHANGE
+ * @access VehiclePropertyAccess:READ_WRITE
+ */
+ HVAC_ELECTRIC_DEFROSTER_ON = (
+ 0x0514
+ | VehiclePropertyGroup:SYSTEM
+ | VehiclePropertyType:BOOLEAN
+ | VehicleArea:WINDOW),
+
/**
* Distance units for display
*
@@ -2458,9 +2471,19 @@
* Bit flags for fan direction
*/
enum VehicleHvacFanDirection : int32_t {
+ UNKNOWN = 0x0,
+
FACE = 0x1,
FLOOR = 0x2,
+ /**
+ * FACE_AND_FLOOR = FACE | FLOOR
+ */
+ FACE_AND_FLOOR = 0X3,
DEFROST = 0x4,
+ /**
+ * DEFROST_AND_FLOOR = DEFROST | FLOOR
+ */
+ DEFROST_AND_FLOOR = 0x06,
};
enum VehicleOilLevel : int32_t {
@@ -2723,6 +2746,8 @@
* Various gears which can be selected by user and chosen in system.
*/
enum VehicleGear : int32_t {
+ GEAR_UNKNOWN = 0x0000,
+
GEAR_NEUTRAL = 0x0001,
GEAR_REVERSE = 0x0002,
GEAR_PARK = 0x0004,
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.cpp b/gnss/1.1/vts/functional/gnss_hal_test.cpp
index f3b376e..61a2ce4 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test.cpp
@@ -28,18 +28,9 @@
using ::android::hardware::gnss::common::Utils;
-// Implementations for the main test class for GNSS HAL
-GnssHalTest::GnssHalTest()
- : info_called_count_(0),
- capabilities_called_count_(0),
- location_called_count_(0),
- name_called_count_(0),
- notify_count_(0) {}
-
void GnssHalTest::SetUp() {
gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
- list_gnss_sv_status_.clear();
ASSERT_NE(gnss_hal_, nullptr);
SetUpGnssCallback();
@@ -48,14 +39,15 @@
void GnssHalTest::TearDown() {
if (gnss_hal_ != nullptr) {
gnss_hal_->cleanup();
+ gnss_hal_ = nullptr;
}
- if (notify_count_ > 0) {
- ALOGW("%d unprocessed callbacks discarded", notify_count_);
- }
+
+ // Set to nullptr to destruct the callback event queues and warn of any unprocessed events.
+ gnss_cb_ = nullptr;
}
void GnssHalTest::SetUpGnssCallback() {
- gnss_cb_ = new GnssCallback(*this);
+ gnss_cb_ = new GnssCallback();
ASSERT_NE(gnss_cb_, nullptr);
auto result = gnss_hal_->setCallback_1_1(gnss_cb_);
@@ -69,13 +61,13 @@
/*
* All capabilities, name and systemInfo callbacks should trigger
*/
- EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
- EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
- EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
+ EXPECT_TRUE(gnss_cb_->capabilities_cbq_.retrieve(gnss_cb_->last_capabilities_, TIMEOUT_SEC));
+ EXPECT_TRUE(gnss_cb_->info_cbq_.retrieve(gnss_cb_->last_info_, TIMEOUT_SEC));
+ EXPECT_TRUE(gnss_cb_->name_cbq_.retrieve(gnss_cb_->last_name_, TIMEOUT_SEC));
- EXPECT_EQ(capabilities_called_count_, 1);
- EXPECT_EQ(info_called_count_, 1);
- EXPECT_EQ(name_called_count_, 1);
+ EXPECT_EQ(gnss_cb_->capabilities_cbq_.calledCount(), 1);
+ EXPECT_EQ(gnss_cb_->info_cbq_.calledCount(), 1);
+ EXPECT_EQ(gnss_cb_->name_cbq_.calledCount(), 1);
}
void GnssHalTest::StopAndClearLocations() {
@@ -89,9 +81,9 @@
* the last reply for final startup messages to arrive (esp. system
* info.)
*/
- while (wait(TIMEOUT_SEC) == std::cv_status::no_timeout) {
+ while (gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_, TIMEOUT_SEC)) {
}
- location_called_count_ = 0;
+ gnss_cb_->location_cbq_.reset();
}
void GnssHalTest::SetPositionMode(const int min_interval_msec, const bool low_power_mode) {
@@ -118,19 +110,22 @@
*/
const int kFirstGnssLocationTimeoutSeconds = 75;
- wait(kFirstGnssLocationTimeoutSeconds);
- EXPECT_EQ(location_called_count_, 1);
+ EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
+ kFirstGnssLocationTimeoutSeconds));
+ int locationCalledCount = gnss_cb_->location_cbq_.calledCount();
+ EXPECT_EQ(locationCalledCount, 1);
- if (location_called_count_ > 0) {
+ if (locationCalledCount > 0) {
// don't require speed on first fix
- CheckLocation(last_location_, false);
+ CheckLocation(gnss_cb_->last_location_, false);
return true;
}
return false;
}
void GnssHalTest::CheckLocation(GnssLocation& location, bool check_speed) {
- bool check_more_accuracies = (info_called_count_ > 0 && last_info_.yearOfHw >= 2017);
+ const bool check_more_accuracies =
+ (gnss_cb_->info_cbq_.calledCount() > 0 && gnss_cb_->last_info_.yearOfHw >= 2017);
Utils::checkLocation(location, check_speed, check_more_accuracies);
}
@@ -145,12 +140,14 @@
EXPECT_TRUE(StartAndCheckFirstLocation());
for (int i = 1; i < count; i++) {
- EXPECT_EQ(std::cv_status::no_timeout, wait(kLocationTimeoutSubsequentSec));
- EXPECT_EQ(location_called_count_, i + 1);
+ EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
+ kLocationTimeoutSubsequentSec));
+ int locationCalledCount = gnss_cb_->location_cbq_.calledCount();
+ EXPECT_EQ(locationCalledCount, i + 1);
// Don't cause confusion by checking details if no location yet
- if (location_called_count_ > 0) {
+ if (locationCalledCount > 0) {
// Should be more than 1 location by now, but if not, still don't check first fix speed
- CheckLocation(last_location_, location_called_count_ > 1);
+ CheckLocation(gnss_cb_->last_location_, locationCalledCount > 1);
}
}
}
@@ -177,60 +174,41 @@
return hasGnssHalVersion_1_1 && !hasGnssHalVersion_2_0;
}
-void GnssHalTest::notify() {
- std::unique_lock<std::mutex> lock(mtx_);
- notify_count_++;
- cv_.notify_one();
-}
-
-std::cv_status GnssHalTest::wait(int timeout_seconds) {
- std::unique_lock<std::mutex> lock(mtx_);
-
- auto status = std::cv_status::no_timeout;
- while (notify_count_ == 0) {
- status = cv_.wait_for(lock, std::chrono::seconds(timeout_seconds));
- if (status == std::cv_status::timeout) return status;
- }
- notify_count_--;
- return status;
-}
+GnssHalTest::GnssCallback::GnssCallback()
+ : info_cbq_("system_info"),
+ name_cbq_("name"),
+ capabilities_cbq_("capabilities"),
+ location_cbq_("location"),
+ sv_status_cbq_("sv_status") {}
Return<void> GnssHalTest::GnssCallback::gnssSetSystemInfoCb(
const IGnssCallback::GnssSystemInfo& info) {
ALOGI("Info received, year %d", info.yearOfHw);
- parent_.info_called_count_++;
- parent_.last_info_ = info;
- parent_.notify();
+ info_cbq_.store(info);
return Void();
}
Return<void> GnssHalTest::GnssCallback::gnssSetCapabilitesCb(uint32_t capabilities) {
ALOGI("Capabilities received %d", capabilities);
- parent_.capabilities_called_count_++;
- parent_.last_capabilities_ = capabilities;
- parent_.notify();
+ capabilities_cbq_.store(capabilities);
return Void();
}
Return<void> GnssHalTest::GnssCallback::gnssNameCb(const android::hardware::hidl_string& name) {
ALOGI("Name received: %s", name.c_str());
- parent_.name_called_count_++;
- parent_.last_name_ = name;
- parent_.notify();
+ name_cbq_.store(name);
return Void();
}
Return<void> GnssHalTest::GnssCallback::gnssLocationCb(const GnssLocation& location) {
ALOGI("Location received");
- parent_.location_called_count_++;
- parent_.last_location_ = location;
- parent_.notify();
+ location_cbq_.store(location);
return Void();
}
Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb(
const IGnssCallback::GnssSvStatus& svStatus) {
ALOGI("GnssSvStatus received");
- parent_.list_gnss_sv_status_.emplace_back(svStatus);
+ sv_status_cbq_.store(svStatus);
return Void();
}
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.h b/gnss/1.1/vts/functional/gnss_hal_test.h
index 84a9f84..e4325bf 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.h
+++ b/gnss/1.1/vts/functional/gnss_hal_test.h
@@ -21,19 +21,17 @@
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <condition_variable>
-#include <list>
-#include <mutex>
+#include "GnssCallbackEventQueue.h"
using android::hardware::Return;
using android::hardware::Void;
using android::hardware::gnss::V1_0::GnssLocation;
+using android::hardware::gnss::common::GnssCallbackEventQueue;
+using android::hardware::gnss::V1_0::GnssLocationFlags;
using android::hardware::gnss::V1_1::IGnss;
using android::hardware::gnss::V1_1::IGnssCallback;
-using android::hardware::gnss::V1_0::GnssLocationFlags;
using android::sp;
@@ -57,8 +55,6 @@
// The main test class for GNSS HAL.
class GnssHalTest : public ::testing::VtsHalHidlTargetTestBase {
public:
- GnssHalTest();
-
virtual void SetUp() override;
virtual void TearDown() override;
@@ -72,32 +68,40 @@
/* Callback class for data & Event. */
class GnssCallback : public IGnssCallback {
public:
- GnssHalTest& parent_;
+ IGnssCallback::GnssSystemInfo last_info_;
+ android::hardware::hidl_string last_name_;
+ uint32_t last_capabilities_;
+ GnssLocation last_location_;
- GnssCallback(GnssHalTest& parent) : parent_(parent){};
+ GnssCallbackEventQueue<IGnssCallback::GnssSystemInfo> info_cbq_;
+ GnssCallbackEventQueue<android::hardware::hidl_string> name_cbq_;
+ GnssCallbackEventQueue<uint32_t> capabilities_cbq_;
+ GnssCallbackEventQueue<GnssLocation> location_cbq_;
+ GnssCallbackEventQueue<IGnssCallback::GnssSvStatus> sv_status_cbq_;
- virtual ~GnssCallback() = default;
+ GnssCallback();
+ virtual ~GnssCallback() = default;
- // Dummy callback handlers
- Return<void> gnssStatusCb(const IGnssCallback::GnssStatusValue /* status */) override {
- return Void();
- }
- Return<void> gnssNmeaCb(int64_t /* timestamp */,
- const android::hardware::hidl_string& /* nmea */) override {
- return Void();
- }
- Return<void> gnssAcquireWakelockCb() override { return Void(); }
- Return<void> gnssReleaseWakelockCb() override { return Void(); }
- Return<void> gnssRequestLocationCb(bool /* independentFromGnss */) override {
- return Void();
- }
- Return<void> gnssRequestTimeCb() override { return Void(); }
- // Actual (test) callback handlers
- Return<void> gnssNameCb(const android::hardware::hidl_string& name) override;
- Return<void> gnssLocationCb(const GnssLocation& location) override;
- Return<void> gnssSetCapabilitesCb(uint32_t capabilities) override;
- Return<void> gnssSetSystemInfoCb(const IGnssCallback::GnssSystemInfo& info) override;
- Return<void> gnssSvStatusCb(const IGnssCallback::GnssSvStatus& svStatus) override;
+ // Dummy callback handlers
+ Return<void> gnssStatusCb(const IGnssCallback::GnssStatusValue /* status */) override {
+ return Void();
+ }
+ Return<void> gnssNmeaCb(int64_t /* timestamp */,
+ const android::hardware::hidl_string& /* nmea */) override {
+ return Void();
+ }
+ Return<void> gnssAcquireWakelockCb() override { return Void(); }
+ Return<void> gnssReleaseWakelockCb() override { return Void(); }
+ Return<void> gnssRequestLocationCb(bool /* independentFromGnss */) override {
+ return Void();
+ }
+ Return<void> gnssRequestTimeCb() override { return Void(); }
+ // Actual (test) callback handlers
+ Return<void> gnssNameCb(const android::hardware::hidl_string& name) override;
+ Return<void> gnssLocationCb(const GnssLocation& location) override;
+ Return<void> gnssSetCapabilitesCb(uint32_t capabilities) override;
+ Return<void> gnssSetSystemInfoCb(const IGnssCallback::GnssSystemInfo& info) override;
+ Return<void> gnssSvStatusCb(const IGnssCallback::GnssSvStatus& svStatus) override;
};
/*
@@ -152,26 +156,7 @@
bool IsGnssHalVersion_1_1() const;
sp<IGnss> gnss_hal_; // GNSS HAL to call into
- sp<IGnssCallback> gnss_cb_; // Primary callback interface
-
- /* Count of calls to set the following items, and the latest item (used by
- * test.)
- */
- int info_called_count_;
- IGnssCallback::GnssSystemInfo last_info_;
- uint32_t last_capabilities_;
- int capabilities_called_count_;
- int location_called_count_;
- GnssLocation last_location_;
- list<IGnssCallback::GnssSvStatus> list_gnss_sv_status_;
-
- int name_called_count_;
- android::hardware::hidl_string last_name_;
-
- private:
- std::mutex mtx_;
- std::condition_variable cv_;
- int notify_count_;
+ sp<GnssCallback> gnss_cb_; // Primary callback interface
};
#endif // GNSS_HAL_TEST_H_
diff --git a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
index ee236ba..3294bcd 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
@@ -50,7 +50,7 @@
ASSERT_TRUE(gnssMeasurement_1_1.isOk());
auto gnssMeasurement_1_0 = gnss_hal_->getExtensionGnssMeasurement();
ASSERT_TRUE(gnssMeasurement_1_0.isOk());
- if (last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENTS) {
+ if (gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENTS) {
sp<IGnssMeasurement_1_1> iGnssMeas_1_1 = gnssMeasurement_1_1;
sp<IGnssMeasurement_1_0> iGnssMeas_1_0 = gnssMeasurement_1_0;
// At least one interface must be non-null.
@@ -78,8 +78,10 @@
const bool kLowPowerMode = true;
// Warmup period - VTS doesn't have AGPS access via GnssLocationProvider
- StartAndCheckLocations(5);
+ gnss_cb_->location_cbq_.reset();
+ StartAndCheckLocations(kLocationsToCheck);
StopAndClearLocations();
+ gnss_cb_->location_cbq_.reset();
// Start of Low Power Mode test
SetPositionMode(kMinIntervalMsec, kLowPowerMode);
@@ -93,24 +95,27 @@
// Verify that kMinIntervalMsec is respected by waiting kNoLocationPeriodSec and
// ensure that no location is received yet
- wait(kNoLocationPeriodSec);
+ gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_, kNoLocationPeriodSec);
+ const int location_called_count = gnss_cb_->location_cbq_.calledCount();
+
// Tolerate (ignore) one extra location right after the first one
// to handle startup edge case scheduling limitations in some implementations
- if ((i == 1) && (location_called_count_ == 2)) {
- CheckLocation(last_location_, true);
+ if ((i == 1) && (location_called_count == 2)) {
+ CheckLocation(gnss_cb_->last_location_, true);
continue; // restart the quiet wait period after this too-fast location
}
- EXPECT_LE(location_called_count_, i);
- if (location_called_count_ != i) {
+ EXPECT_LE(location_called_count, i);
+ if (location_called_count != i) {
ALOGW("GetLocationLowPower test - not enough locations received. %d vs. %d expected ",
- location_called_count_, i);
+ location_called_count, i);
}
- if (std::cv_status::no_timeout !=
- wait(kLocationTimeoutSubsequentSec - kNoLocationPeriodSec)) {
+ if (!gnss_cb_->location_cbq_.retrieve(
+ gnss_cb_->last_location_,
+ kLocationTimeoutSubsequentSec - kNoLocationPeriodSec)) {
ALOGW("GetLocationLowPower test - timeout awaiting location %d", i);
} else {
- CheckLocation(last_location_, true);
+ CheckLocation(gnss_cb_->last_location_, true);
}
}
@@ -222,12 +227,15 @@
const int kLocationsToAwait = 3;
const int kRetriesToUnBlacklist = 10;
+ gnss_cb_->location_cbq_.reset();
StartAndCheckLocations(kLocationsToAwait);
+ int location_called_count = gnss_cb_->location_cbq_.calledCount();
// Tolerate 1 less sv status to handle edge cases in reporting.
- EXPECT_GE((int)list_gnss_sv_status_.size() + 1, kLocationsToAwait);
- ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)",
- (int)list_gnss_sv_status_.size(), kLocationsToAwait, location_called_count_);
+ int sv_status_cbq_size = gnss_cb_->sv_status_cbq_.size();
+ EXPECT_GE(sv_status_cbq_size + 1, kLocationsToAwait);
+ ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)", sv_status_cbq_size,
+ kLocationsToAwait, location_called_count);
/*
* Identify strongest SV seen at least kLocationsToAwait -1 times
@@ -235,8 +243,14 @@
* observability (one epoch RF null)
*/
+ const int kGnssSvStatusTimeout = 2;
+ list<IGnssCallback::GnssSvStatus> sv_status_list;
+ int count = gnss_cb_->sv_status_cbq_.retrieve(sv_status_list, sv_status_cbq_size,
+ kGnssSvStatusTimeout);
+ ASSERT_EQ(count, sv_status_cbq_size);
+
IGnssConfiguration::BlacklistedSource source_to_blacklist =
- FindStrongFrequentNonGpsSource(list_gnss_sv_status_, kLocationsToAwait - 1);
+ FindStrongFrequentNonGpsSource(sv_status_list, kLocationsToAwait - 1);
if (source_to_blacklist.constellation == GnssConstellationType::UNKNOWN) {
// Cannot find a non-GPS satellite. Let the test pass.
@@ -260,21 +274,26 @@
EXPECT_TRUE(result);
// retry and ensure satellite not used
- list_gnss_sv_status_.clear();
+ gnss_cb_->sv_status_cbq_.reset();
+ gnss_cb_->location_cbq_.reset();
StartAndCheckLocations(kLocationsToAwait);
// early exit if test is being run with insufficient signal
- if (location_called_count_ == 0) {
+ location_called_count = gnss_cb_->location_cbq_.calledCount();
+ if (location_called_count == 0) {
ALOGE("0 Gnss locations received - ensure sufficient signal and retry");
}
- ASSERT_TRUE(location_called_count_ > 0);
+ ASSERT_TRUE(location_called_count > 0);
// Tolerate 1 less sv status to handle edge cases in reporting.
- EXPECT_GE((int)list_gnss_sv_status_.size() + 1, kLocationsToAwait);
- ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)",
- (int)list_gnss_sv_status_.size(), kLocationsToAwait, location_called_count_);
- for (const auto& gnss_sv_status : list_gnss_sv_status_) {
+ sv_status_cbq_size = gnss_cb_->sv_status_cbq_.size();
+ EXPECT_GE(sv_status_cbq_size + 1, kLocationsToAwait);
+ ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)", sv_status_cbq_size,
+ kLocationsToAwait, location_called_count);
+ for (int i = 0; i < sv_status_cbq_size; ++i) {
+ IGnssCallback::GnssSvStatus gnss_sv_status;
+ gnss_cb_->sv_status_cbq_.retrieve(gnss_sv_status, kGnssSvStatusTimeout);
for (uint32_t iSv = 0; iSv < gnss_sv_status.numSvs; iSv++) {
const auto& gnss_sv = gnss_sv_status.gnssSvList[iSv];
EXPECT_FALSE((gnss_sv.svid == source_to_blacklist.svid) &&
@@ -295,24 +314,28 @@
int unblacklist_loops_remaining = kRetriesToUnBlacklist;
while (!strongest_sv_is_reobserved && (unblacklist_loops_remaining-- > 0)) {
StopAndClearLocations();
- list_gnss_sv_status_.clear();
+ gnss_cb_->sv_status_cbq_.reset();
+ gnss_cb_->location_cbq_.reset();
StartAndCheckLocations(kLocationsToAwait);
// early exit loop if test is being run with insufficient signal
- if (location_called_count_ == 0) {
+ location_called_count = gnss_cb_->location_cbq_.calledCount();
+ if (location_called_count == 0) {
ALOGE("0 Gnss locations received - ensure sufficient signal and retry");
}
- ASSERT_TRUE(location_called_count_ > 0);
+ ASSERT_TRUE(location_called_count > 0);
// Tolerate 1 less sv status to handle edge cases in reporting.
- EXPECT_GE((int)list_gnss_sv_status_.size() + 1, kLocationsToAwait);
- ALOGD(
- "Clear blacklist, observed %d GnssSvStatus, while awaiting %d Locations"
- ", tries remaining %d",
- (int)list_gnss_sv_status_.size(), kLocationsToAwait, unblacklist_loops_remaining);
+ sv_status_cbq_size = gnss_cb_->sv_status_cbq_.size();
+ EXPECT_GE(sv_status_cbq_size + 1, kLocationsToAwait);
+ ALOGD("Clear blacklist, observed %d GnssSvStatus, while awaiting %d Locations"
+ ", tries remaining %d",
+ sv_status_cbq_size, kLocationsToAwait, unblacklist_loops_remaining);
- for (const auto& gnss_sv_status : list_gnss_sv_status_) {
+ for (int i = 0; i < sv_status_cbq_size; ++i) {
+ IGnssCallback::GnssSvStatus gnss_sv_status;
+ gnss_cb_->sv_status_cbq_.retrieve(gnss_sv_status, kGnssSvStatusTimeout);
for (uint32_t iSv = 0; iSv < gnss_sv_status.numSvs; iSv++) {
const auto& gnss_sv = gnss_sv_status.gnssSvList[iSv];
if ((gnss_sv.svid == source_to_blacklist.svid) &&
@@ -347,16 +370,22 @@
const int kLocationsToAwait = 3;
+ gnss_cb_->location_cbq_.reset();
StartAndCheckLocations(kLocationsToAwait);
+ const int location_called_count = gnss_cb_->location_cbq_.calledCount();
// Tolerate 1 less sv status to handle edge cases in reporting.
- EXPECT_GE((int)list_gnss_sv_status_.size() + 1, kLocationsToAwait);
- ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)",
- (int)list_gnss_sv_status_.size(), kLocationsToAwait, location_called_count_);
+ int sv_status_cbq_size = gnss_cb_->sv_status_cbq_.size();
+ EXPECT_GE(sv_status_cbq_size + 1, kLocationsToAwait);
+ ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations (%d received)", sv_status_cbq_size,
+ kLocationsToAwait, location_called_count);
// Find first non-GPS constellation to blacklist
+ const int kGnssSvStatusTimeout = 2;
GnssConstellationType constellation_to_blacklist = GnssConstellationType::UNKNOWN;
- for (const auto& gnss_sv_status : list_gnss_sv_status_) {
+ for (int i = 0; i < sv_status_cbq_size; ++i) {
+ IGnssCallback::GnssSvStatus gnss_sv_status;
+ gnss_cb_->sv_status_cbq_.retrieve(gnss_sv_status, kGnssSvStatusTimeout);
for (uint32_t iSv = 0; iSv < gnss_sv_status.numSvs; iSv++) {
const auto& gnss_sv = gnss_sv_status.gnssSvList[iSv];
if ((gnss_sv.svFlag & IGnssCallback::GnssSvFlags::USED_IN_FIX) &&
@@ -395,16 +424,19 @@
EXPECT_TRUE(result);
// retry and ensure constellation not used
- list_gnss_sv_status_.clear();
+ gnss_cb_->sv_status_cbq_.reset();
- location_called_count_ = 0;
+ gnss_cb_->location_cbq_.reset();
StartAndCheckLocations(kLocationsToAwait);
// Tolerate 1 less sv status to handle edge cases in reporting.
- EXPECT_GE((int)list_gnss_sv_status_.size() + 1, kLocationsToAwait);
- ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations", (int)list_gnss_sv_status_.size(),
+ sv_status_cbq_size = gnss_cb_->sv_status_cbq_.size();
+ EXPECT_GE(sv_status_cbq_size + 1, kLocationsToAwait);
+ ALOGD("Observed %d GnssSvStatus, while awaiting %d Locations", sv_status_cbq_size,
kLocationsToAwait);
- for (const auto& gnss_sv_status : list_gnss_sv_status_) {
+ for (int i = 0; i < sv_status_cbq_size; ++i) {
+ IGnssCallback::GnssSvStatus gnss_sv_status;
+ gnss_cb_->sv_status_cbq_.retrieve(gnss_sv_status, kGnssSvStatusTimeout);
for (uint32_t iSv = 0; iSv < gnss_sv_status.numSvs; iSv++) {
const auto& gnss_sv = gnss_sv_status.gnssSvList[iSv];
EXPECT_FALSE((gnss_sv.constellation == source_to_blacklist.constellation) &&
@@ -427,7 +459,7 @@
*/
TEST_F(GnssHalTest, InjectBestLocation) {
StartAndCheckLocations(1);
- GnssLocation gnssLocation = last_location_;
+ GnssLocation gnssLocation = gnss_cb_->last_location_;
CheckLocation(gnssLocation, true);
auto result = gnss_hal_->injectBestLocation(gnssLocation);
@@ -447,7 +479,7 @@
TEST_F(GnssHalTest, GnssDebugValuesSanityTest) {
auto gnssDebug = gnss_hal_->getExtensionGnssDebug();
ASSERT_TRUE(gnssDebug.isOk());
- if (info_called_count_ > 0 && last_info_.yearOfHw >= 2017) {
+ if (gnss_cb_->info_cbq_.calledCount() > 0 && gnss_cb_->last_info_.yearOfHw >= 2017) {
sp<IGnssDebug> iGnssDebug = gnssDebug;
EXPECT_NE(iGnssDebug, nullptr);
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.h b/gnss/2.0/vts/functional/gnss_hal_test.h
index 90a7866..7d07c0f 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.h
+++ b/gnss/2.0/vts/functional/gnss_hal_test.h
@@ -17,19 +17,16 @@
#ifndef GNSS_HAL_TEST_H_
#define GNSS_HAL_TEST_H_
-#include <android/hardware/gnss/2.0/IGnss.h>
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <condition_variable>
-#include <deque>
-#include <list>
-#include <mutex>
+#include <android/hardware/gnss/2.0/IGnss.h>
+#include "GnssCallbackEventQueue.h"
using android::hardware::hidl_vec;
using android::hardware::Return;
using android::hardware::Void;
+using android::hardware::gnss::common::GnssCallbackEventQueue;
using android::hardware::gnss::measurement_corrections::V1_0::IMeasurementCorrectionsCallback;
using android::hardware::gnss::V1_0::GnssLocationFlags;
using android::hardware::gnss::V2_0::IGnss;
@@ -70,50 +67,6 @@
virtual void TearDown() override;
- /* Producer/consumer queue for storing/retrieving callback events from GNSS HAL */
- template <class T>
- class CallbackQueue {
- public:
- CallbackQueue(const std::string& name) : name_(name), called_count_(0){};
- ~CallbackQueue() { reset(); }
-
- /* Adds callback event to the end of the queue. */
- void store(const T& event);
-
- /*
- * Removes the callack event at the front of the queue, stores it in event parameter
- * and returns true. Returns false on timeout and event is not populated.
- */
- bool retrieve(T& event, int timeout_seconds);
-
- /*
- * Removes parameter count number of callack events at the front of the queue, stores
- * them in event_list parameter and returns the number of events retrieved. Waits up to
- * timeout_seconds to retrieve each event. If timeout occurs, it returns the number of
- * items retrieved which will be less than count.
- */
- int retrieve(list<T>& event_list, int count, int timeout_seconds);
-
- /* Returns the number of events pending to be retrieved from the callback event queue. */
- int size() const;
-
- /* Returns the number of callback events received since last reset(). */
- int calledCount() const;
-
- /* Clears the callback event queue and resets the calledCount() to 0. */
- void reset();
-
- private:
- CallbackQueue(const CallbackQueue&) = delete;
- CallbackQueue& operator=(const CallbackQueue&) = delete;
-
- std::string name_;
- int called_count_;
- mutable std::recursive_mutex mtx_;
- std::condition_variable_any cv_;
- std::deque<T> events_;
- };
-
/* Callback class for data & Event. */
class GnssCallback : public IGnssCallback_2_0 {
public:
@@ -122,11 +75,11 @@
uint32_t last_capabilities_;
GnssLocation_2_0 last_location_;
- CallbackQueue<IGnssCallback_1_0::GnssSystemInfo> info_cbq_;
- CallbackQueue<android::hardware::hidl_string> name_cbq_;
- CallbackQueue<uint32_t> capabilities_cbq_;
- CallbackQueue<GnssLocation_2_0> location_cbq_;
- CallbackQueue<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> sv_info_list_cbq_;
+ GnssCallbackEventQueue<IGnssCallback_1_0::GnssSystemInfo> info_cbq_;
+ GnssCallbackEventQueue<android::hardware::hidl_string> name_cbq_;
+ GnssCallbackEventQueue<uint32_t> capabilities_cbq_;
+ GnssCallbackEventQueue<GnssLocation_2_0> location_cbq_;
+ GnssCallbackEventQueue<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> sv_info_list_cbq_;
GnssCallback();
virtual ~GnssCallback() = default;
@@ -169,7 +122,7 @@
/* Callback class for GnssMeasurement. */
class GnssMeasurementCallback : public IGnssMeasurementCallback_2_0 {
public:
- CallbackQueue<IGnssMeasurementCallback_2_0::GnssData> measurement_cbq_;
+ GnssCallbackEventQueue<IGnssMeasurementCallback_2_0::GnssData> measurement_cbq_;
GnssMeasurementCallback() : measurement_cbq_("measurement"){};
virtual ~GnssMeasurementCallback() = default;
@@ -192,7 +145,7 @@
class GnssMeasurementCorrectionsCallback : public IMeasurementCorrectionsCallback {
public:
uint32_t last_capabilities_;
- CallbackQueue<uint32_t> capabilities_cbq_;
+ GnssCallbackEventQueue<uint32_t> capabilities_cbq_;
GnssMeasurementCorrectionsCallback() : capabilities_cbq_("capabilities"){};
virtual ~GnssMeasurementCorrectionsCallback() = default;
@@ -252,61 +205,4 @@
sp<GnssCallback> gnss_cb_; // Primary callback interface
};
-template <class T>
-void GnssHalTest::CallbackQueue<T>::store(const T& event) {
- std::unique_lock<std::recursive_mutex> lock(mtx_);
- events_.push_back(event);
- ++called_count_;
- lock.unlock();
- cv_.notify_all();
-}
-
-template <class T>
-bool GnssHalTest::CallbackQueue<T>::retrieve(T& event, int timeout_seconds) {
- std::unique_lock<std::recursive_mutex> lock(mtx_);
- cv_.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] { return !events_.empty(); });
- if (events_.empty()) {
- return false;
- }
- event = events_.front();
- events_.pop_front();
- return true;
-}
-
-template <class T>
-int GnssHalTest::CallbackQueue<T>::retrieve(list<T>& event_list, int count, int timeout_seconds) {
- for (int i = 0; i < count; ++i) {
- T event;
- if (!retrieve(event, timeout_seconds)) {
- return i;
- }
- event_list.push_back(event);
- }
-
- return count;
-}
-
-template <class T>
-int GnssHalTest::CallbackQueue<T>::size() const {
- std::unique_lock<std::recursive_mutex> lock(mtx_);
- return events_.size();
-}
-
-template <class T>
-int GnssHalTest::CallbackQueue<T>::calledCount() const {
- std::unique_lock<std::recursive_mutex> lock(mtx_);
- return called_count_;
-}
-
-template <class T>
-void GnssHalTest::CallbackQueue<T>::reset() {
- std::unique_lock<std::recursive_mutex> lock(mtx_);
- if (!events_.empty()) {
- ALOGW("%u unprocessed events discarded in callback queue %s", (unsigned int)events_.size(),
- name_.c_str());
- }
- events_.clear();
- called_count_ = 0;
-}
-
#endif // GNSS_HAL_TEST_H_
diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
index 39736cc..ca3edc5 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
@@ -453,18 +453,18 @@
// ensure that no location is received yet
gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_, kNoLocationPeriodSec);
- const int locationCalledCount = gnss_cb_->location_cbq_.calledCount();
+ const int location_called_count = gnss_cb_->location_cbq_.calledCount();
// Tolerate (ignore) one extra location right after the first one
// to handle startup edge case scheduling limitations in some implementations
- if ((i == 1) && (locationCalledCount == 2)) {
+ if ((i == 1) && (location_called_count == 2)) {
CheckLocation(gnss_cb_->last_location_, true);
continue; // restart the quiet wait period after this too-fast location
}
- EXPECT_LE(locationCalledCount, i);
- if (locationCalledCount != i) {
+ EXPECT_LE(location_called_count, i);
+ if (location_called_count != i) {
ALOGW("GetLocationLowPower test - not enough locations received. %d vs. %d expected ",
- locationCalledCount, i);
+ location_called_count, i);
}
if (!gnss_cb_->location_cbq_.retrieve(
diff --git a/gnss/common/utils/vts/include/GnssCallbackEventQueue.h b/gnss/common/utils/vts/include/GnssCallbackEventQueue.h
new file mode 100644
index 0000000..b1d8ed4
--- /dev/null
+++ b/gnss/common/utils/vts/include/GnssCallbackEventQueue.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_gnss_common_vts_GnssCallbackEventQueue_H_
+#define android_hardware_gnss_common_vts_GnssCallbackEventQueue_H_
+
+#include <log/log.h>
+
+#include <condition_variable>
+#include <deque>
+#include <list>
+#include <mutex>
+
+namespace android {
+namespace hardware {
+namespace gnss {
+namespace common {
+
+/*
+ * Producer/consumer queue for storing/retrieving callback events from GNSS HAL.
+ */
+template <class T>
+class GnssCallbackEventQueue {
+ public:
+ GnssCallbackEventQueue(const std::string& name) : name_(name), called_count_(0){};
+ ~GnssCallbackEventQueue() { reset(); }
+
+ /* Adds callback event to the end of the queue. */
+ void store(const T& event);
+
+ /*
+ * Removes the callack event at the front of the queue, stores it in event parameter
+ * and returns true. Returns false on timeout and event is not populated.
+ */
+ bool retrieve(T& event, int timeout_seconds);
+
+ /*
+ * Removes parameter count number of callack events at the front of the queue, stores
+ * them in event_list parameter and returns the number of events retrieved. Waits up to
+ * timeout_seconds to retrieve each event. If timeout occurs, it returns the number of
+ * items retrieved which will be less than count.
+ */
+ int retrieve(list<T>& event_list, int count, int timeout_seconds);
+
+ /* Returns the number of events pending to be retrieved from the callback event queue. */
+ int size() const;
+
+ /* Returns the number of callback events received since last reset(). */
+ int calledCount() const;
+
+ /* Clears the callback event queue and resets the calledCount() to 0. */
+ void reset();
+
+ private:
+ GnssCallbackEventQueue(const GnssCallbackEventQueue&) = delete;
+ GnssCallbackEventQueue& operator=(const GnssCallbackEventQueue&) = delete;
+
+ std::string name_;
+ int called_count_;
+ mutable std::recursive_mutex mtx_;
+ std::condition_variable_any cv_;
+ std::deque<T> events_;
+};
+
+template <class T>
+void GnssCallbackEventQueue<T>::store(const T& event) {
+ std::unique_lock<std::recursive_mutex> lock(mtx_);
+ events_.push_back(event);
+ ++called_count_;
+ lock.unlock();
+ cv_.notify_all();
+}
+
+template <class T>
+bool GnssCallbackEventQueue<T>::retrieve(T& event, int timeout_seconds) {
+ std::unique_lock<std::recursive_mutex> lock(mtx_);
+ cv_.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] { return !events_.empty(); });
+ if (events_.empty()) {
+ return false;
+ }
+ event = events_.front();
+ events_.pop_front();
+ return true;
+}
+
+template <class T>
+int GnssCallbackEventQueue<T>::retrieve(list<T>& event_list, int count, int timeout_seconds) {
+ for (int i = 0; i < count; ++i) {
+ T event;
+ if (!retrieve(event, timeout_seconds)) {
+ return i;
+ }
+ event_list.push_back(event);
+ }
+
+ return count;
+}
+
+template <class T>
+int GnssCallbackEventQueue<T>::size() const {
+ std::unique_lock<std::recursive_mutex> lock(mtx_);
+ return events_.size();
+}
+
+template <class T>
+int GnssCallbackEventQueue<T>::calledCount() const {
+ std::unique_lock<std::recursive_mutex> lock(mtx_);
+ return called_count_;
+}
+
+template <class T>
+void GnssCallbackEventQueue<T>::reset() {
+ std::unique_lock<std::recursive_mutex> lock(mtx_);
+ if (!events_.empty()) {
+ ALOGW("%u unprocessed events discarded in callback queue %s", (unsigned int)events_.size(),
+ name_.c_str());
+ }
+ events_.clear();
+ called_count_ = 0;
+}
+
+} // namespace common
+} // namespace gnss
+} // namespace hardware
+} // namespace android
+
+#endif // android_hardware_gnss_common_vts_GnssCallbackEventQueue_H_
diff --git a/graphics/composer/2.2/vts/functional/Android.bp b/graphics/composer/2.2/vts/functional/Android.bp
index cc24774..1319035 100644
--- a/graphics/composer/2.2/vts/functional/Android.bp
+++ b/graphics/composer/2.2/vts/functional/Android.bp
@@ -29,6 +29,7 @@
"libhidlbase",
"libhidltransport",
"libhwbinder",
+ "libprocessgroup",
"libsync",
"libui",
"libgui",
diff --git a/sensors/1.0/default/OWNERS b/sensors/1.0/default/OWNERS
index 2031d84..90c2330 100644
--- a/sensors/1.0/default/OWNERS
+++ b/sensors/1.0/default/OWNERS
@@ -1,2 +1,3 @@
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
diff --git a/sensors/1.0/vts/functional/Android.bp b/sensors/1.0/vts/functional/Android.bp
index 444797d..7bb992b 100644
--- a/sensors/1.0/vts/functional/Android.bp
+++ b/sensors/1.0/vts/functional/Android.bp
@@ -24,7 +24,10 @@
],
static_libs: [
"android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.allocator@3.0",
"android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@2.1",
+ "android.hardware.graphics.mapper@3.0",
"android.hardware.sensors@1.0",
"VtsHalSensorsTargetTestUtils",
],
diff --git a/sensors/1.0/vts/functional/OWNERS b/sensors/1.0/vts/functional/OWNERS
index 759d87b..892da15 100644
--- a/sensors/1.0/vts/functional/OWNERS
+++ b/sensors/1.0/vts/functional/OWNERS
@@ -1,6 +1,7 @@
# Sensors team
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
# VTS team
trong@google.com
diff --git a/sensors/2.0/default/OWNERS b/sensors/2.0/default/OWNERS
index 2031d84..90c2330 100644
--- a/sensors/2.0/default/OWNERS
+++ b/sensors/2.0/default/OWNERS
@@ -1,2 +1,3 @@
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
diff --git a/sensors/2.0/vts/functional/Android.bp b/sensors/2.0/vts/functional/Android.bp
index 98f0eac..4765fa2 100644
--- a/sensors/2.0/vts/functional/Android.bp
+++ b/sensors/2.0/vts/functional/Android.bp
@@ -24,7 +24,10 @@
],
static_libs: [
"android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.allocator@3.0",
"android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@2.1",
+ "android.hardware.graphics.mapper@3.0",
"android.hardware.sensors@1.0",
"android.hardware.sensors@2.0",
"libfmq",
diff --git a/sensors/2.0/vts/functional/OWNERS b/sensors/2.0/vts/functional/OWNERS
index 759d87b..892da15 100644
--- a/sensors/2.0/vts/functional/OWNERS
+++ b/sensors/2.0/vts/functional/OWNERS
@@ -1,6 +1,7 @@
# Sensors team
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
# VTS team
trong@google.com
diff --git a/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp b/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
index 6ff393d..8364ba9 100644
--- a/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
+++ b/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
@@ -176,19 +176,21 @@
// Helper functions
void activateAllSensors(bool enable);
std::vector<SensorInfo> getNonOneShotSensors();
+ std::vector<SensorInfo> getNonOneShotAndNonSpecialSensors();
std::vector<SensorInfo> getOneShotSensors();
std::vector<SensorInfo> getInjectEventSensors();
int32_t getInvalidSensorHandle();
bool getDirectChannelSensor(SensorInfo* sensor, SharedMemType* memType, RateLevel* rate);
void verifyDirectChannel(SharedMemType memType);
- void verifyRegisterDirectChannel(const SensorInfo& sensor, SharedMemType memType,
- std::shared_ptr<SensorsTestSharedMemory> mem,
- int32_t* directChannelHandle);
+ void verifyRegisterDirectChannel(std::shared_ptr<SensorsTestSharedMemory> mem,
+ int32_t* directChannelHandle, bool supportsSharedMemType,
+ bool supportsAnyDirectChannel);
void verifyConfigure(const SensorInfo& sensor, SharedMemType memType,
- int32_t directChannelHandle);
- void verifyUnregisterDirectChannel(const SensorInfo& sensor, SharedMemType memType,
- int32_t directChannelHandle);
+ int32_t directChannelHandle, bool directChannelSupported);
+ void verifyUnregisterDirectChannel(int32_t directChannelHandle, bool directChannelSupported);
void checkRateLevel(const SensorInfo& sensor, int32_t directChannelHandle, RateLevel rateLevel);
+ void queryDirectChannelSupport(SharedMemType memType, bool* supportsSharedMemType,
+ bool* supportsAnyDirectChannel);
};
Return<Result> SensorsHidlTest::activate(int32_t sensorHandle, bool enabled) {
@@ -257,6 +259,18 @@
return sensors;
}
+std::vector<SensorInfo> SensorsHidlTest::getNonOneShotAndNonSpecialSensors() {
+ std::vector<SensorInfo> sensors;
+ for (const SensorInfo& info : getSensorsList()) {
+ SensorFlagBits reportMode = extractReportMode(info.flags);
+ if (reportMode != SensorFlagBits::ONE_SHOT_MODE &&
+ reportMode != SensorFlagBits::SPECIAL_REPORTING_MODE) {
+ sensors.push_back(info);
+ }
+ }
+ return sensors;
+}
+
std::vector<SensorInfo> SensorsHidlTest::getOneShotSensors() {
std::vector<SensorInfo> sensors;
for (const SensorInfo& info : getSensorsList()) {
@@ -777,7 +791,12 @@
activateAllSensors(false /* enable */);
for (const SensorInfo& sensor : getSensorsList()) {
// Call batch on inactive sensor
- ASSERT_EQ(batch(sensor.sensorHandle, sensor.minDelay, 0 /* maxReportLatencyNs */),
+ // One shot sensors have minDelay set to -1 which is an invalid
+ // parameter. Use 0 instead to avoid errors.
+ int64_t samplingPeriodNs = extractReportMode(sensor.flags) == SensorFlagBits::ONE_SHOT_MODE
+ ? 0
+ : sensor.minDelay;
+ ASSERT_EQ(batch(sensor.sensorHandle, samplingPeriodNs, 0 /* maxReportLatencyNs */),
Result::OK);
// Activate the sensor
@@ -830,9 +849,10 @@
EventCallback callback;
getEnvironment()->registerCallback(&callback);
- const std::vector<SensorInfo> sensors = getSensorsList();
+ // This test is not valid for one-shot or special-report-mode sensors
+ const std::vector<SensorInfo> sensors = getNonOneShotAndNonSpecialSensors();
milliseconds maxMinDelay(0);
- for (const SensorInfo& sensor : getSensorsList()) {
+ for (const SensorInfo& sensor : sensors) {
milliseconds minDelay = duration_cast<milliseconds>(microseconds(sensor.minDelay));
maxMinDelay = milliseconds(std::max(maxMinDelay.count(), minDelay.count()));
}
@@ -849,9 +869,14 @@
// Save the last received event for each sensor
std::map<int32_t, int64_t> lastEventTimestampMap;
for (const SensorInfo& sensor : sensors) {
- ASSERT_GE(callback.getEvents(sensor.sensorHandle).size(), 1);
- lastEventTimestampMap[sensor.sensorHandle] =
- callback.getEvents(sensor.sensorHandle).back().timestamp;
+ // Some on-change sensors may not report an event without stimulus
+ if (extractReportMode(sensor.flags) != SensorFlagBits::ON_CHANGE_MODE) {
+ ASSERT_GE(callback.getEvents(sensor.sensorHandle).size(), 1);
+ }
+ if (callback.getEvents(sensor.sensorHandle).size() >= 1) {
+ lastEventTimestampMap[sensor.sensorHandle] =
+ callback.getEvents(sensor.sensorHandle).back().timestamp;
+ }
}
// Allow some time to pass, reset the callback, then reactivate the sensors
@@ -862,6 +887,14 @@
activateAllSensors(false);
for (const SensorInfo& sensor : sensors) {
+ // Skip sensors that did not previously report an event
+ if (lastEventTimestampMap.find(sensor.sensorHandle) == lastEventTimestampMap.end()) {
+ continue;
+ }
+ // Skip on-change sensors that do not consistently report an initial event
+ if (callback.getEvents(sensor.sensorHandle).size() < 1) {
+ continue;
+ }
// Ensure that the first event received is not stale by ensuring that its timestamp is
// sufficiently different from the previous event
const Event newEvent = callback.getEvents(sensor.sensorHandle).front();
@@ -878,21 +911,43 @@
[&](Result result, int32_t reportToken) {
if (isDirectReportRateSupported(sensor, rateLevel)) {
ASSERT_EQ(result, Result::OK);
- ASSERT_GT(reportToken, 0);
+ if (rateLevel != RateLevel::STOP) {
+ ASSERT_GT(reportToken, 0);
+ }
} else {
ASSERT_EQ(result, Result::BAD_VALUE);
}
});
}
-void SensorsHidlTest::verifyRegisterDirectChannel(const SensorInfo& sensor, SharedMemType memType,
- std::shared_ptr<SensorsTestSharedMemory> mem,
- int32_t* directChannelHandle) {
+void SensorsHidlTest::queryDirectChannelSupport(SharedMemType memType, bool* supportsSharedMemType,
+ bool* supportsAnyDirectChannel) {
+ *supportsSharedMemType = false;
+ *supportsAnyDirectChannel = false;
+ for (const SensorInfo& curSensor : getSensorsList()) {
+ if (isDirectChannelTypeSupported(curSensor, memType)) {
+ *supportsSharedMemType = true;
+ }
+ if (isDirectChannelTypeSupported(curSensor, SharedMemType::ASHMEM) ||
+ isDirectChannelTypeSupported(curSensor, SharedMemType::GRALLOC)) {
+ *supportsAnyDirectChannel = true;
+ }
+
+ if (*supportsSharedMemType && *supportsAnyDirectChannel) {
+ break;
+ }
+ }
+}
+
+void SensorsHidlTest::verifyRegisterDirectChannel(std::shared_ptr<SensorsTestSharedMemory> mem,
+ int32_t* directChannelHandle,
+ bool supportsSharedMemType,
+ bool supportsAnyDirectChannel) {
char* buffer = mem->getBuffer();
memset(buffer, 0xff, mem->getSize());
registerDirectChannel(mem->getSharedMemInfo(), [&](Result result, int32_t channelHandle) {
- if (isDirectChannelTypeSupported(sensor, memType)) {
+ if (supportsSharedMemType) {
ASSERT_EQ(result, Result::OK);
ASSERT_GT(channelHandle, 0);
@@ -901,7 +956,9 @@
ASSERT_EQ(buffer[i], 0x00);
}
} else {
- ASSERT_EQ(result, Result::INVALID_OPERATION);
+ Result expectedResult =
+ supportsAnyDirectChannel ? Result::BAD_VALUE : Result::INVALID_OPERATION;
+ ASSERT_EQ(result, expectedResult);
ASSERT_EQ(channelHandle, -1);
}
*directChannelHandle = channelHandle;
@@ -909,7 +966,7 @@
}
void SensorsHidlTest::verifyConfigure(const SensorInfo& sensor, SharedMemType memType,
- int32_t directChannelHandle) {
+ int32_t directChannelHandle, bool supportsAnyDirectChannel) {
if (isDirectChannelTypeSupported(sensor, memType)) {
// Verify that each rate level is properly supported
checkRateLevel(sensor, directChannelHandle, RateLevel::NORMAL);
@@ -925,22 +982,22 @@
-1 /* sensorHandle */, directChannelHandle, RateLevel::STOP,
[](Result result, int32_t /* reportToken */) { ASSERT_EQ(result, Result::OK); });
} else {
- // Direct channel is not supported for this SharedMemType
+ // directChannelHandle will be -1 here, HAL should either reject it as a bad value if there
+ // is some level of direct channel report, otherwise return INVALID_OPERATION if direct
+ // channel is not supported at all
+ Result expectedResult =
+ supportsAnyDirectChannel ? Result::BAD_VALUE : Result::INVALID_OPERATION;
configDirectReport(sensor.sensorHandle, directChannelHandle, RateLevel::NORMAL,
- [](Result result, int32_t /* reportToken */) {
- ASSERT_EQ(result, Result::INVALID_OPERATION);
+ [expectedResult](Result result, int32_t /* reportToken */) {
+ ASSERT_EQ(result, expectedResult);
});
}
}
-void SensorsHidlTest::verifyUnregisterDirectChannel(const SensorInfo& sensor, SharedMemType memType,
- int32_t directChannelHandle) {
- Result result = unregisterDirectChannel(directChannelHandle);
- if (isDirectChannelTypeSupported(sensor, memType)) {
- ASSERT_EQ(result, Result::OK);
- } else {
- ASSERT_EQ(result, Result::INVALID_OPERATION);
- }
+void SensorsHidlTest::verifyUnregisterDirectChannel(int32_t directChannelHandle,
+ bool supportsAnyDirectChannel) {
+ Result expectedResult = supportsAnyDirectChannel ? Result::OK : Result::INVALID_OPERATION;
+ ASSERT_EQ(unregisterDirectChannel(directChannelHandle), expectedResult);
}
void SensorsHidlTest::verifyDirectChannel(SharedMemType memType) {
@@ -951,11 +1008,16 @@
SensorsTestSharedMemory::create(memType, kMemSize));
ASSERT_NE(mem, nullptr);
+ bool supportsSharedMemType;
+ bool supportsAnyDirectChannel;
+ queryDirectChannelSupport(memType, &supportsSharedMemType, &supportsAnyDirectChannel);
+
for (const SensorInfo& sensor : getSensorsList()) {
int32_t directChannelHandle = 0;
- verifyRegisterDirectChannel(sensor, memType, mem, &directChannelHandle);
- verifyConfigure(sensor, memType, directChannelHandle);
- verifyUnregisterDirectChannel(sensor, memType, directChannelHandle);
+ verifyRegisterDirectChannel(mem, &directChannelHandle, supportsSharedMemType,
+ supportsAnyDirectChannel);
+ verifyConfigure(sensor, memType, directChannelHandle, supportsAnyDirectChannel);
+ verifyUnregisterDirectChannel(directChannelHandle, supportsAnyDirectChannel);
}
}
diff --git a/sensors/common/vts/OWNERS b/sensors/common/vts/OWNERS
index 759d87b..892da15 100644
--- a/sensors/common/vts/OWNERS
+++ b/sensors/common/vts/OWNERS
@@ -1,6 +1,7 @@
# Sensors team
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
# VTS team
trong@google.com
diff --git a/sensors/common/vts/utils/Android.bp b/sensors/common/vts/utils/Android.bp
index 8da554a..02dc608 100644
--- a/sensors/common/vts/utils/Android.bp
+++ b/sensors/common/vts/utils/Android.bp
@@ -31,7 +31,10 @@
],
static_libs: [
"android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.allocator@3.0",
"android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@2.1",
+ "android.hardware.graphics.mapper@3.0",
"android.hardware.sensors@1.0",
"VtsHalHidlTargetTestBase",
],
diff --git a/sensors/common/vts/utils/GrallocWrapper.cpp b/sensors/common/vts/utils/GrallocWrapper.cpp
index 222ef96..1cad913 100644
--- a/sensors/common/vts/utils/GrallocWrapper.cpp
+++ b/sensors/common/vts/utils/GrallocWrapper.cpp
@@ -16,206 +16,262 @@
#include "GrallocWrapper.h"
+#include <android/hardware/graphics/allocator/2.0/IAllocator.h>
+#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/2.0/IMapper.h>
+#include <android/hardware/graphics/mapper/2.1/IMapper.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
#include <utils/Log.h>
+#include <cinttypes>
+#include <type_traits>
+
+using IAllocator2 = ::android::hardware::graphics::allocator::V2_0::IAllocator;
+using IAllocator3 = ::android::hardware::graphics::allocator::V3_0::IAllocator;
+using IMapper2 = ::android::hardware::graphics::mapper::V2_0::IMapper;
+using IMapper2_1 = ::android::hardware::graphics::mapper::V2_1::IMapper;
+using IMapper3 = ::android::hardware::graphics::mapper::V3_0::IMapper;
+
+using Error2 = ::android::hardware::graphics::mapper::V2_0::Error;
+using Error3 = ::android::hardware::graphics::mapper::V3_0::Error;
+
+using ::android::hardware::graphics::common::V1_0::BufferUsage;
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+// This is a typedef to the same underlying type across v2.0 and v3.0
+using ::android::hardware::graphics::mapper::V2_0::BufferDescriptor;
+
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
namespace android {
-GrallocWrapper::GrallocWrapper() {
- init();
+// Since we use the same APIs across allocator/mapper HALs but they have major
+// version differences (meaning they are not related through inheritance), we
+// create a common interface abstraction for the IAllocator + IMapper combination
+// (major versions need to match in the current HALs, e.g. IAllocator 3.0 needs to
+// be paired with IMapper 3.0, so these are tied together)
+class IGrallocHalWrapper {
+ public:
+ virtual ~IGrallocHalWrapper() = default;
+
+ // IAllocator
+ virtual std::string dumpDebugInfo() = 0;
+ virtual native_handle_t* allocate(uint32_t size) = 0;
+ virtual void freeBuffer(native_handle_t* bufferHandle) = 0;
+
+ // IMapper
+ virtual void* lock(native_handle_t* bufferHandle) = 0;
+ virtual void unlock(native_handle_t* bufferHandle) = 0;
+};
+
+namespace {
+
+bool failed(Error2 error) {
+ return (error != Error2::NONE);
+}
+bool failed(Error3 error) {
+ return (error != Error3::NONE);
}
-void GrallocWrapper::init() {
- mAllocator = allocator2::IAllocator::getService();
- if (mAllocator == nullptr) {
- ALOGE("Failed to get allocator service");
- }
-
- mMapper = mapper2::IMapper::getService();
- if (mMapper == nullptr) {
- ALOGE("Failed to get mapper service");
- } else if (mMapper->isRemote()) {
- ALOGE("Mapper is not in passthrough mode");
- }
-}
-
-GrallocWrapper::~GrallocWrapper() {
- for (auto bufferHandle : mClonedBuffers) {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
- native_handle_close(buffer);
- native_handle_delete(buffer);
- }
- mClonedBuffers.clear();
-
- for (auto bufferHandle : mImportedBuffers) {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
- if (mMapper->freeBuffer(buffer) != mapper2::Error::NONE) {
- ALOGE("Failed to free buffer %p", buffer);
+// Since all the type and function names are the same for the things we use across the major HAL
+// versions, we use template magic to avoid repeating ourselves.
+template <typename AllocatorT, typename MapperT>
+class GrallocHalWrapper : public IGrallocHalWrapper {
+ public:
+ GrallocHalWrapper(const sp<AllocatorT>& allocator, const sp<MapperT>& mapper)
+ : mAllocator(allocator), mMapper(mapper) {
+ if (mapper->isRemote()) {
+ ALOGE("Mapper is in passthrough mode");
}
}
- mImportedBuffers.clear();
-}
-sp<allocator2::IAllocator> GrallocWrapper::getAllocator() const {
- return mAllocator;
-}
+ virtual std::string dumpDebugInfo() override;
+ virtual native_handle_t* allocate(uint32_t size) override;
+ virtual void freeBuffer(native_handle_t* bufferHandle) override;
-std::string GrallocWrapper::dumpDebugInfo() {
+ virtual void* lock(native_handle_t* bufferHandle) override;
+ virtual void unlock(native_handle_t* bufferHandle) override;
+
+ private:
+ static constexpr uint64_t kBufferUsage =
+ static_cast<uint64_t>(BufferUsage::SENSOR_DIRECT_DATA | BufferUsage::CPU_READ_OFTEN);
+ sp<AllocatorT> mAllocator;
+ sp<MapperT> mMapper;
+
+ BufferDescriptor getDescriptor(uint32_t size);
+ native_handle_t* importBuffer(const hidl_handle& rawHandle);
+};
+
+template <typename AllocatorT, typename MapperT>
+std::string GrallocHalWrapper<AllocatorT, MapperT>::dumpDebugInfo() {
std::string debugInfo;
- mAllocator->dumpDebugInfo([&](const auto& tmpDebugInfo) { debugInfo = tmpDebugInfo.c_str(); });
-
+ mAllocator->dumpDebugInfo([&](const hidl_string& tmpDebugInfo) { debugInfo = tmpDebugInfo; });
return debugInfo;
}
-const native_handle_t* GrallocWrapper::cloneBuffer(const hardware::hidl_handle& rawHandle) {
- const native_handle_t* bufferHandle = native_handle_clone(rawHandle.getNativeHandle());
+template <typename AllocatorT, typename MapperT>
+native_handle_t* GrallocHalWrapper<AllocatorT, MapperT>::allocate(uint32_t size) {
+ constexpr uint32_t kBufferCount = 1;
+ BufferDescriptor descriptor = getDescriptor(size);
+ native_handle_t* bufferHandle = nullptr;
- if (bufferHandle) {
- mClonedBuffers.insert(bufferHandle);
- }
+ auto callback = [&](auto error, uint32_t /*stride*/, const hidl_vec<hidl_handle>& buffers) {
+ if (failed(error)) {
+ ALOGE("Failed to allocate buffer: %" PRId32, static_cast<int32_t>(error));
+ } else if (buffers.size() != kBufferCount) {
+ ALOGE("Invalid buffer array size (got %zu, expected %" PRIu32 ")", buffers.size(),
+ kBufferCount);
+ } else {
+ bufferHandle = importBuffer(buffers[0]);
+ }
+ };
+
+ mAllocator->allocate(descriptor, kBufferCount, callback);
return bufferHandle;
}
-std::vector<const native_handle_t*> GrallocWrapper::allocate(
- const mapper2::BufferDescriptor& descriptor, uint32_t count, bool import, uint32_t* outStride) {
- std::vector<const native_handle_t*> bufferHandles;
- bufferHandles.reserve(count);
- mAllocator->allocate(descriptor, count,
- [&](const auto& tmpError, const auto& tmpStride, const auto& tmpBuffers) {
- if (mapper2::Error::NONE != tmpError) {
- ALOGE("Failed to allocate buffers");
- }
- if (count != tmpBuffers.size()) {
- ALOGE("Invalid buffer array");
- }
-
- for (uint32_t i = 0; i < count; i++) {
- if (import) {
- bufferHandles.push_back(importBuffer(tmpBuffers[i]));
- } else {
- bufferHandles.push_back(cloneBuffer(tmpBuffers[i]));
- }
- }
-
- if (outStride) {
- *outStride = tmpStride;
- }
- });
-
- return bufferHandles;
+template <typename AllocatorT, typename MapperT>
+void GrallocHalWrapper<AllocatorT, MapperT>::freeBuffer(native_handle_t* bufferHandle) {
+ auto error = mMapper->freeBuffer(bufferHandle);
+ if (!error.isOk() || failed(error)) {
+ ALOGE("Failed to free buffer %p", bufferHandle);
+ }
}
-const native_handle_t* GrallocWrapper::allocate(
- const mapper2::IMapper::BufferDescriptorInfo& descriptorInfo, bool import,
- uint32_t* outStride) {
- mapper2::BufferDescriptor descriptor = createDescriptor(descriptorInfo);
- auto buffers = allocate(descriptor, 1, import, outStride);
- return buffers[0];
-}
+template <typename AllocatorT, typename MapperT>
+BufferDescriptor GrallocHalWrapper<AllocatorT, MapperT>::getDescriptor(uint32_t size) {
+ typename MapperT::BufferDescriptorInfo descriptorInfo = {
+ .width = size,
+ .height = 1,
+ .layerCount = 1,
+ .usage = kBufferUsage,
+ .format = static_cast<decltype(descriptorInfo.format)>(PixelFormat::BLOB),
+ };
-sp<mapper2::IMapper> GrallocWrapper::getMapper() const {
- return mMapper;
-}
-
-mapper2::BufferDescriptor GrallocWrapper::createDescriptor(
- const mapper2::IMapper::BufferDescriptorInfo& descriptorInfo) {
- mapper2::BufferDescriptor descriptor;
- mMapper->createDescriptor(descriptorInfo, [&](const auto& tmpError, const auto& tmpDescriptor) {
- if (tmpError != mapper2::Error::NONE) {
- ALOGE("Failed to create descriptor");
+ BufferDescriptor descriptor;
+ auto callback = [&](auto error, const BufferDescriptor& tmpDescriptor) {
+ if (failed(error)) {
+ ALOGE("Failed to create descriptor: %" PRId32, static_cast<int32_t>(error));
+ } else {
+ descriptor = tmpDescriptor;
}
- descriptor = tmpDescriptor;
- });
+ };
+ mMapper->createDescriptor(descriptorInfo, callback);
return descriptor;
}
-const native_handle_t* GrallocWrapper::importBuffer(const hardware::hidl_handle& rawHandle) {
- const native_handle_t* bufferHandle = nullptr;
- mMapper->importBuffer(rawHandle, [&](const auto& tmpError, const auto& tmpBuffer) {
- if (tmpError != mapper2::Error::NONE) {
- ALOGE("Failed to import buffer %p", rawHandle.getNativeHandle());
- }
- bufferHandle = static_cast<const native_handle_t*>(tmpBuffer);
- });
+template <typename AllocatorT, typename MapperT>
+native_handle_t* GrallocHalWrapper<AllocatorT, MapperT>::importBuffer(
+ const hidl_handle& rawHandle) {
+ native_handle_t* bufferHandle = nullptr;
- if (bufferHandle) {
- mImportedBuffers.insert(bufferHandle);
- }
+ mMapper->importBuffer(rawHandle, [&](auto error, void* tmpBuffer) {
+ if (failed(error)) {
+ ALOGE("Failed to import buffer %p: %" PRId32, rawHandle.getNativeHandle(),
+ static_cast<int32_t>(error));
+ } else {
+ bufferHandle = static_cast<native_handle_t*>(tmpBuffer);
+ }
+ });
return bufferHandle;
}
-void GrallocWrapper::freeBuffer(const native_handle_t* bufferHandle) {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
-
- if (mImportedBuffers.erase(bufferHandle)) {
- mapper2::Error error = mMapper->freeBuffer(buffer);
- if (error != mapper2::Error::NONE) {
- ALOGE("Failed to free %p", buffer);
- }
- } else {
- mClonedBuffers.erase(bufferHandle);
- native_handle_close(buffer);
- native_handle_delete(buffer);
- }
-}
-
-void* GrallocWrapper::lock(const native_handle_t* bufferHandle, uint64_t cpuUsage,
- const mapper2::IMapper::Rect& accessRegion, int acquireFence) {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
-
- NATIVE_HANDLE_DECLARE_STORAGE(acquireFenceStorage, 1, 0);
- hardware::hidl_handle acquireFenceHandle;
- if (acquireFence >= 0) {
- auto h = native_handle_init(acquireFenceStorage, 1, 0);
- h->data[0] = acquireFence;
- acquireFenceHandle = h;
- }
+template <typename AllocatorT, typename MapperT>
+void* GrallocHalWrapper<AllocatorT, MapperT>::lock(native_handle_t* bufferHandle) {
+ // Per the HAL, all-zeros Rect means the entire buffer
+ typename MapperT::Rect accessRegion = {};
+ hidl_handle acquireFenceHandle; // No fence needed, already safe to lock
void* data = nullptr;
- mMapper->lock(buffer, cpuUsage, accessRegion, acquireFenceHandle,
- [&](const auto& tmpError, const auto& tmpData) {
- if (tmpError != mapper2::Error::NONE) {
- ALOGE("Failed to lock buffer %p", buffer);
+ mMapper->lock(bufferHandle, kBufferUsage, accessRegion, acquireFenceHandle,
+ [&](auto error, void* tmpData, ...) { // V3_0 passes extra args we don't use
+ if (failed(error)) {
+ ALOGE("Failed to lock buffer %p: %" PRId32, bufferHandle,
+ static_cast<int32_t>(error));
+ } else {
+ data = tmpData;
}
- data = tmpData;
});
- if (acquireFence >= 0) {
- close(acquireFence);
- }
-
return data;
}
-int GrallocWrapper::unlock(const native_handle_t* bufferHandle) {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
-
- int releaseFence = -1;
- mMapper->unlock(buffer, [&](const auto& tmpError, const auto& tmpReleaseFence) {
- if (tmpError != mapper2::Error::NONE) {
- ALOGE("Failed to unlock buffer %p", buffer);
- }
-
- auto fenceHandle = tmpReleaseFence.getNativeHandle();
- if (fenceHandle) {
- if (fenceHandle->numInts != 0) {
- ALOGE("Invalid fence handle %p", fenceHandle);
- }
- if (fenceHandle->numFds == 1) {
- releaseFence = dup(fenceHandle->data[0]);
- if (releaseFence < 0) {
- ALOGE("Failed to dup fence fd");
- }
- } else {
- if (fenceHandle->numFds != 0) {
- ALOGE("Invalid fence handle %p", fenceHandle);
- }
- }
+template <typename AllocatorT, typename MapperT>
+void GrallocHalWrapper<AllocatorT, MapperT>::unlock(native_handle_t* bufferHandle) {
+ mMapper->unlock(bufferHandle, [&](auto error, const hidl_handle& /*releaseFence*/) {
+ if (failed(error)) {
+ ALOGE("Failed to unlock buffer %p: %" PRId32, bufferHandle,
+ static_cast<int32_t>(error));
}
});
+}
- return releaseFence;
+} // anonymous namespace
+
+GrallocWrapper::GrallocWrapper() {
+ sp<IAllocator3> allocator3 = IAllocator3::getService();
+ sp<IMapper3> mapper3 = IMapper3::getService();
+
+ if (allocator3 != nullptr && mapper3 != nullptr) {
+ mGrallocHal = std::unique_ptr<IGrallocHalWrapper>(
+ new GrallocHalWrapper<IAllocator3, IMapper3>(allocator3, mapper3));
+ } else {
+ ALOGD("Graphics HALs 3.0 not found (allocator %d mapper %d), falling back to 2.x",
+ (allocator3 != nullptr), (mapper3 != nullptr));
+
+ sp<IAllocator2> allocator2 = IAllocator2::getService();
+ sp<IMapper2> mapper2 = IMapper2_1::getService();
+ if (mapper2 == nullptr) {
+ mapper2 = IMapper2::getService();
+ }
+
+ if (allocator2 != nullptr && mapper2 != nullptr) {
+ mGrallocHal = std::unique_ptr<IGrallocHalWrapper>(
+ new GrallocHalWrapper<IAllocator2, IMapper2>(allocator2, mapper2));
+ } else {
+ ALOGE("Couldn't open 2.x/3.0 graphics HALs (2.x allocator %d mapper %d)",
+ (allocator2 != nullptr), (mapper2 != nullptr));
+ }
+ }
+}
+
+GrallocWrapper::~GrallocWrapper() {
+ for (auto bufferHandle : mAllocatedBuffers) {
+ mGrallocHal->unlock(bufferHandle);
+ mGrallocHal->freeBuffer(bufferHandle);
+ }
+ mAllocatedBuffers.clear();
+}
+
+std::string GrallocWrapper::dumpDebugInfo() {
+ return mGrallocHal->dumpDebugInfo();
+}
+
+std::pair<native_handle_t*, void*> GrallocWrapper::allocate(uint32_t size) {
+ native_handle_t* bufferHandle = mGrallocHal->allocate(size);
+ void* buffer = nullptr;
+ if (bufferHandle) {
+ buffer = mGrallocHal->lock(bufferHandle);
+ if (buffer) {
+ mAllocatedBuffers.insert(bufferHandle);
+ } else {
+ mGrallocHal->freeBuffer(bufferHandle);
+ bufferHandle = nullptr;
+ }
+ }
+ return std::make_pair<>(bufferHandle, buffer);
+}
+
+void GrallocWrapper::freeBuffer(native_handle_t* bufferHandle) {
+ if (mAllocatedBuffers.erase(bufferHandle)) {
+ mGrallocHal->unlock(bufferHandle);
+ mGrallocHal->freeBuffer(bufferHandle);
+ }
}
} // namespace android
diff --git a/sensors/common/vts/utils/OWNERS b/sensors/common/vts/utils/OWNERS
index 759d87b..892da15 100644
--- a/sensors/common/vts/utils/OWNERS
+++ b/sensors/common/vts/utils/OWNERS
@@ -1,6 +1,7 @@
# Sensors team
+arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
# VTS team
trong@google.com
diff --git a/sensors/common/vts/utils/SensorsTestSharedMemory.cpp b/sensors/common/vts/utils/SensorsTestSharedMemory.cpp
index 819e297..3b068bd 100644
--- a/sensors/common/vts/utils/SensorsTestSharedMemory.cpp
+++ b/sensors/common/vts/utils/SensorsTestSharedMemory.cpp
@@ -119,32 +119,13 @@
}
case SharedMemType::GRALLOC: {
mGrallocWrapper = std::make_unique<::android::GrallocWrapper>();
- if (mGrallocWrapper->getAllocator() == nullptr ||
- mGrallocWrapper->getMapper() == nullptr) {
+ if (!mGrallocWrapper->isInitialized()) {
break;
}
- using android::hardware::graphics::common::V1_0::BufferUsage;
- using android::hardware::graphics::common::V1_0::PixelFormat;
- mapper2::IMapper::BufferDescriptorInfo buf_desc_info = {
- .width = static_cast<uint32_t>(size),
- .height = 1,
- .layerCount = 1,
- .usage = static_cast<uint64_t>(BufferUsage::SENSOR_DIRECT_DATA |
- BufferUsage::CPU_READ_OFTEN),
- .format = PixelFormat::BLOB};
- handle = const_cast<native_handle_t*>(mGrallocWrapper->allocate(buf_desc_info));
- if (handle != nullptr) {
- mapper2::IMapper::Rect region{0, 0, static_cast<int32_t>(buf_desc_info.width),
- static_cast<int32_t>(buf_desc_info.height)};
- buffer = static_cast<char*>(
- mGrallocWrapper->lock(handle, buf_desc_info.usage, region, /*fence=*/-1));
- if (buffer != nullptr) {
- break;
- }
- mGrallocWrapper->freeBuffer(handle);
- handle = nullptr;
- }
+ std::pair<native_handle_t*, void*> buf = mGrallocWrapper->allocate(size);
+ handle = buf.first;
+ buffer = static_cast<char*>(buf.second);
break;
}
default:
@@ -175,9 +156,7 @@
}
case SharedMemType::GRALLOC: {
if (mSize != 0) {
- mGrallocWrapper->unlock(mNativeHandle);
mGrallocWrapper->freeBuffer(mNativeHandle);
-
mNativeHandle = nullptr;
mSize = 0;
}
diff --git a/sensors/common/vts/utils/include/sensors-vts-utils/GrallocWrapper.h b/sensors/common/vts/utils/include/sensors-vts-utils/GrallocWrapper.h
index 3bd73c3..41e6334 100644
--- a/sensors/common/vts/utils/include/sensors-vts-utils/GrallocWrapper.h
+++ b/sensors/common/vts/utils/include/sensors-vts-utils/GrallocWrapper.h
@@ -14,66 +14,47 @@
* limitations under the License.
*/
-#ifndef GRALLO_WRAPPER_H_
-#define GRALLO_WRAPPER_H_
+#pragma once
+#include <utils/NativeHandle.h>
+
+#include <memory>
+#include <string>
#include <unordered_set>
-
-#include <android/hardware/graphics/allocator/2.0/IAllocator.h>
-#include <android/hardware/graphics/mapper/2.0/IMapper.h>
-
-namespace allocator2 = ::android::hardware::graphics::allocator::V2_0;
-namespace mapper2 = ::android::hardware::graphics::mapper::V2_0;
+#include <utility>
namespace android {
-// Modified from hardware/interfaces/graphics/mapper/2.0/vts/functional/
+class IGrallocHalWrapper;
+
+// Reference: hardware/interfaces/graphics/mapper/2.0/vts/functional/
class GrallocWrapper {
public:
GrallocWrapper();
~GrallocWrapper();
- sp<allocator2::IAllocator> getAllocator() const;
- sp<mapper2::IMapper> getMapper() const;
+ // After constructing this object, this function must be called to check the result. If it
+ // returns false, other methods are not safe to call.
+ bool isInitialized() const { return (mGrallocHal != nullptr); };
std::string dumpDebugInfo();
- // When import is false, this simply calls IAllocator::allocate. When import
- // is true, the returned buffers are also imported into the mapper.
- //
- // Either case, the returned buffers must be freed with freeBuffer.
- std::vector<const native_handle_t*> allocate(const mapper2::BufferDescriptor& descriptor,
- uint32_t count, bool import = true,
- uint32_t* outStride = nullptr);
- const native_handle_t* allocate(const mapper2::IMapper::BufferDescriptorInfo& descriptorInfo,
- bool import = true, uint32_t* outStride = nullptr);
+ // Allocates a gralloc buffer suitable for direct channel sensors usage with the given size.
+ // The buffer should be freed using freeBuffer when it's not needed anymore; otherwise it'll
+ // be freed when this object is destroyed.
+ // Returns a handle to the buffer, and a CPU-accessible pointer for reading. On failure, both
+ // will be set to nullptr.
+ std::pair<native_handle_t*, void*> allocate(uint32_t size);
- mapper2::BufferDescriptor createDescriptor(
- const mapper2::IMapper::BufferDescriptorInfo& descriptorInfo);
+ // Releases a gralloc buffer previously returned by allocate()
+ void freeBuffer(native_handle_t* bufferHandle);
- const native_handle_t* importBuffer(const hardware::hidl_handle& rawHandle);
- void freeBuffer(const native_handle_t* bufferHandle);
-
- // We use fd instead of hardware::hidl_handle in these functions to pass fences
- // in and out of the mapper. The ownership of the fd is always transferred
- // with each of these functions.
- void* lock(const native_handle_t* bufferHandle, uint64_t cpuUsage,
- const mapper2::IMapper::Rect& accessRegion, int acquireFence);
-
- int unlock(const native_handle_t* bufferHandle);
-
- private:
- void init();
- const native_handle_t* cloneBuffer(const hardware::hidl_handle& rawHandle);
-
- sp<allocator2::IAllocator> mAllocator;
- sp<mapper2::IMapper> mMapper;
+ private:
+ std::unique_ptr<IGrallocHalWrapper> mGrallocHal;
// Keep track of all cloned and imported handles. When a test fails with
// ASSERT_*, the destructor will free the handles for the test.
- std::unordered_set<const native_handle_t*> mClonedBuffers;
- std::unordered_set<const native_handle_t*> mImportedBuffers;
+ std::unordered_set<native_handle_t*> mAllocatedBuffers;
};
} // namespace android
-#endif // GRALLO_WRAPPER_H_