Merge changes from topics "breakdown-usage-by-uid", "notify-device-interaction" into udc-qpr-dev
* changes:
Break down input device usage by uid
Notify MetricsCollector of device interaction from Dispatcher
diff --git a/data/etc/Android.bp b/data/etc/Android.bp
index 754e7b2..226cae1 100644
--- a/data/etc/Android.bp
+++ b/data/etc/Android.bp
@@ -329,6 +329,12 @@
}
prebuilt_etc {
+ name: "android.software.opengles.deqp.level-2023-03-01.prebuilt.xml",
+ src: "android.software.opengles.deqp.level-2023-03-01.xml",
+ defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
name: "android.software.sip.voip.prebuilt.xml",
src: "android.software.sip.voip.xml",
defaults: ["frameworks_native_data_etc_defaults"],
@@ -353,6 +359,12 @@
}
prebuilt_etc {
+ name: "android.software.vulkan.deqp.level-2023-03-01.prebuilt.xml",
+ src: "android.software.vulkan.deqp.level-2023-03-01.xml",
+ defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
name: "aosp_excluded_hardware.prebuilt.xml",
src: "aosp_excluded_hardware.xml",
defaults: ["frameworks_native_data_etc_defaults"],
diff --git a/libs/nativewindow/include/android/hardware_buffer_aidl.h b/libs/nativewindow/include/android/hardware_buffer_aidl.h
index 1659d54..e269f0d 100644
--- a/libs/nativewindow/include/android/hardware_buffer_aidl.h
+++ b/libs/nativewindow/include/android/hardware_buffer_aidl.h
@@ -34,6 +34,10 @@
#include <android/hardware_buffer.h>
#include <sys/cdefs.h>
+#ifdef __cplusplus
+#include <string>
+#endif
+
__BEGIN_DECLS
/**
@@ -142,6 +146,15 @@
return ret;
}
+ inline std::string toString() const {
+ if (!mBuffer) {
+ return "<HardwareBuffer: Invalid>";
+ }
+ uint64_t id = 0;
+ AHardwareBuffer_getId(mBuffer, &id);
+ return "<HardwareBuffer " + std::to_string(id) + ">";
+ }
+
private:
HardwareBuffer(const HardwareBuffer& other) = delete;
HardwareBuffer& operator=(const HardwareBuffer& other) = delete;
diff --git a/libs/ultrahdr/fuzzer/Android.bp b/libs/ultrahdr/fuzzer/Android.bp
index 27b38c3..6c0a2f5 100644
--- a/libs/ultrahdr/fuzzer/Android.bp
+++ b/libs/ultrahdr/fuzzer/Android.bp
@@ -24,7 +24,17 @@
cc_defaults {
name: "ultrahdr_fuzzer_defaults",
host_supported: true,
- static_libs: ["liblog"],
+ shared_libs: [
+ "libimage_io",
+ "libjpeg",
+ ],
+ static_libs: [
+ "libjpegdecoder",
+ "libjpegencoder",
+ "libultrahdr",
+ "libutils",
+ "liblog",
+ ],
target: {
darwin: {
enabled: false,
@@ -37,6 +47,8 @@
description: "The fuzzers target the APIs of jpeg hdr",
service_privilege: "constrained",
users: "multi_user",
+ fuzzed_code_usage: "future_version",
+ vector: "local_no_privileges_required",
},
}
@@ -46,20 +58,12 @@
srcs: [
"ultrahdr_enc_fuzzer.cpp",
],
- shared_libs: [
- "libimage_io",
- "libjpeg",
- "liblog",
- ],
- static_libs: [
- "libjpegdecoder",
- "libjpegencoder",
- "libultrahdr",
- "libutils",
- ],
- fuzz_config: {
- fuzzed_code_usage: "future_version",
- vector: "local_no_privileges_required",
- },
}
+cc_fuzz {
+ name: "ultrahdr_dec_fuzzer",
+ defaults: ["ultrahdr_fuzzer_defaults"],
+ srcs: [
+ "ultrahdr_dec_fuzzer.cpp",
+ ],
+}
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
new file mode 100644
index 0000000..ad1d57a
--- /dev/null
+++ b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// System include files
+#include <fuzzer/FuzzedDataProvider.h>
+#include <iostream>
+#include <vector>
+
+// User include files
+#include "ultrahdr/jpegr.h"
+
+using namespace android::ultrahdr;
+
+// Transfer functions for image data, sync with ultrahdr.h
+const int kOfMin = ULTRAHDR_OUTPUT_UNSPECIFIED + 1;
+const int kOfMax = ULTRAHDR_OUTPUT_MAX;
+
+class UltraHdrDecFuzzer {
+public:
+ UltraHdrDecFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+ void process();
+
+private:
+ FuzzedDataProvider mFdp;
+};
+
+void UltraHdrDecFuzzer::process() {
+ // hdr_of
+ auto of = static_cast<ultrahdr_output_format>(mFdp.ConsumeIntegralInRange<int>(kOfMin, kOfMax));
+ auto buffer = mFdp.ConsumeRemainingBytes<uint8_t>();
+ jpegr_compressed_struct jpegImgR{buffer.data(), (int)buffer.size(), (int)buffer.size(),
+ ULTRAHDR_COLORGAMUT_UNSPECIFIED};
+
+ std::vector<uint8_t> iccData(0);
+ std::vector<uint8_t> exifData(0);
+ jpegr_info_struct info{0, 0, &iccData, &exifData};
+ JpegR jpegHdr;
+ (void)jpegHdr.getJPEGRInfo(&jpegImgR, &info);
+//#define DUMP_PARAM
+#ifdef DUMP_PARAM
+ std::cout << "input buffer size " << jpegImgR.length << std::endl;
+ std::cout << "image dimensions " << info.width << " x " << info.width << std::endl;
+#endif
+ size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_SDR) ? 4 : 8);
+ jpegr_uncompressed_struct decodedJpegR;
+ auto decodedRaw = std::make_unique<uint8_t[]>(outSize);
+ decodedJpegR.data = decodedRaw.get();
+ ultrahdr_metadata_struct metadata;
+ jpegr_uncompressed_struct decodedGainMap{};
+ (void)jpegHdr.decodeJPEGR(&jpegImgR, &decodedJpegR,
+ mFdp.ConsumeFloatingPointInRange<float>(1.0, FLT_MAX), nullptr, of,
+ &decodedGainMap, &metadata);
+ if (decodedGainMap.data) free(decodedGainMap.data);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ UltraHdrDecFuzzer fuzzHandle(data, size);
+ fuzzHandle.process();
+ return 0;
+}
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
index 7faa157..acb9b79 100644
--- a/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
+++ b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
@@ -55,12 +55,9 @@
const int kQfMin = 0;
const int kQfMax = 100;
-// seed
-const unsigned kSeed = 0x7ab7;
-
-class JpegHDRFuzzer {
+class UltraHdrEncFuzzer {
public:
- JpegHDRFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+ UltraHdrEncFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
void process();
void fillP010Buffer(uint16_t* data, int width, int height, int stride);
void fill420Buffer(uint8_t* data, int size);
@@ -69,7 +66,7 @@
FuzzedDataProvider mFdp;
};
-void JpegHDRFuzzer::fillP010Buffer(uint16_t* data, int width, int height, int stride) {
+void UltraHdrEncFuzzer::fillP010Buffer(uint16_t* data, int width, int height, int stride) {
uint16_t* tmp = data;
std::vector<uint16_t> buffer(16);
for (int i = 0; i < buffer.size(); i++) {
@@ -78,22 +75,24 @@
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i += buffer.size()) {
memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (width - i)));
- std::shuffle(buffer.begin(), buffer.end(), std::default_random_engine(kSeed));
+ std::shuffle(buffer.begin(), buffer.end(),
+ std::default_random_engine(std::random_device{}()));
}
tmp += stride;
}
}
-void JpegHDRFuzzer::fill420Buffer(uint8_t* data, int size) {
+void UltraHdrEncFuzzer::fill420Buffer(uint8_t* data, int size) {
std::vector<uint8_t> buffer(16);
mFdp.ConsumeData(buffer.data(), buffer.size());
for (int i = 0; i < size; i += buffer.size()) {
memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (size - i)));
- std::shuffle(buffer.begin(), buffer.end(), std::default_random_engine(kSeed));
+ std::shuffle(buffer.begin(), buffer.end(),
+ std::default_random_engine(std::random_device{}()));
}
}
-void JpegHDRFuzzer::process() {
+void UltraHdrEncFuzzer::process() {
while (mFdp.remaining_bytes()) {
struct jpegr_uncompressed_struct p010Img {};
struct jpegr_uncompressed_struct yuv420Img {};
@@ -256,7 +255,7 @@
} else if (tf == ULTRAHDR_TF_PQ) {
metadata.maxContentBoost = kPqMaxNits / kSdrWhiteNits;
} else {
- metadata.maxContentBoost = 0;
+ metadata.maxContentBoost = 1.0f;
}
metadata.minContentBoost = 1.0f;
status = jpegHdr.encodeJPEGR(&jpegImg, &jpegGainMap, &metadata, &jpegImgR);
@@ -293,7 +292,7 @@
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- JpegHDRFuzzer fuzzHandle(data, size);
+ UltraHdrEncFuzzer fuzzHandle(data, size);
fuzzHandle.process();
return 0;
}
diff --git a/libs/ultrahdr/gainmapmath.cpp b/libs/ultrahdr/gainmapmath.cpp
index 37c3cf3..ee15363 100644
--- a/libs/ultrahdr/gainmapmath.cpp
+++ b/libs/ultrahdr/gainmapmath.cpp
@@ -119,34 +119,39 @@
return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value;
}
-// See IEC 61966-2-1, Equation F.7.
+// See IEC 61966-2-1/Amd 1:2003, Equation F.7.
static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f;
float srgbLuminance(Color e) {
return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b;
}
-// See ECMA TR/98, Section 7.
-static const float kSrgbRCr = 1.402f, kSrgbGCb = 0.34414f, kSrgbGCr = 0.71414f, kSrgbBCb = 1.772f;
-
-Color srgbYuvToRgb(Color e_gamma) {
- return {{{ clampPixelFloat(e_gamma.y + kSrgbRCr * e_gamma.v),
- clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
- clampPixelFloat(e_gamma.y + kSrgbBCb * e_gamma.u) }}};
-}
-
-// See ECMA TR/98, Section 7.
-static const float kSrgbYR = 0.299f, kSrgbYG = 0.587f, kSrgbYB = 0.114f;
-static const float kSrgbUR = -0.1687f, kSrgbUG = -0.3313f, kSrgbUB = 0.5f;
-static const float kSrgbVR = 0.5f, kSrgbVG = -0.4187f, kSrgbVB = -0.0813f;
+// See ITU-R BT.709-6, Section 3.
+// Uses the same coefficients for deriving luma signal as
+// IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance
+// function above.
+static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f;
Color srgbRgbToYuv(Color e_gamma) {
- return {{{ kSrgbYR * e_gamma.r + kSrgbYG * e_gamma.g + kSrgbYB * e_gamma.b,
- kSrgbUR * e_gamma.r + kSrgbUG * e_gamma.g + kSrgbUB * e_gamma.b,
- kSrgbVR * e_gamma.r + kSrgbVG * e_gamma.g + kSrgbVB * e_gamma.b }}};
+ float y_gamma = srgbLuminance(e_gamma);
+ return {{{ y_gamma,
+ (e_gamma.b - y_gamma) / kSrgbCb,
+ (e_gamma.r - y_gamma) / kSrgbCr }}};
}
-// See IEC 61966-2-1, Equations F.5 and F.6.
+// See ITU-R BT.709-6, Section 3.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we
+// can reuse the luminance coefficients since they are the same.
+static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG;
+static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG;
+
+Color srgbYuvToRgb(Color e_gamma) {
+ return {{{ clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u) }}};
+}
+
+// See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6.
float srgbInvOetf(float e_gamma) {
if (e_gamma <= 0.04045f) {
return e_gamma / 12.92f;
@@ -178,13 +183,38 @@
////////////////////////////////////////////////////////////////////////////////
// Display-P3 transformations
-// See SMPTE EG 432-1, Table 7-2.
+// See SMPTE EG 432-1, Equation 7-8.
static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f;
float p3Luminance(Color e) {
return kP3R * e.r + kP3G * e.g + kP3B * e.b;
}
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Unfortunately, calculation of luma signal differs from calculation of
+// luminance for Display-P3, so we can't reuse p3Luminance here.
+static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f;
+static const float kP3Cb = 1.772f, kP3Cr = 1.402f;
+
+Color p3RgbToYuv(Color e_gamma) {
+ float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b;
+ return {{{ y_gamma,
+ (e_gamma.b - y_gamma) / kP3Cb,
+ (e_gamma.r - y_gamma) / kP3Cr }}};
+}
+
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must
+// use luma signal coefficients rather than the luminance coefficients.
+static const float kP3GCb = kP3YB * kP3Cb / kP3YG;
+static const float kP3GCr = kP3YR * kP3Cr / kP3YG;
+
+Color p3YuvToRgb(Color e_gamma) {
+ return {{{ clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v),
+ clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u) }}};
+}
+
////////////////////////////////////////////////////////////////////////////////
// BT.2100 transformations - according to ITU-R BT.2100-2
@@ -197,6 +227,8 @@
}
// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+// BT.2100 uses the same coefficients for calculating luma signal and luminance,
+// so we reuse the luminance function here.
static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f;
Color bt2100RgbToYuv(Color e_gamma) {
@@ -206,6 +238,10 @@
(e_gamma.r - y_gamma) / kBt2100Cr }}};
}
+// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+//
+// Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients.
+//
// Derived by inversing bt2100RgbToYuv. The derivation for R and B are pretty
// straight forward; we just invert the formulas for U and V above. But deriving
// the formula for G is a bit more complicated:
@@ -440,6 +476,85 @@
}
}
+// All of these conversions are derived from the respective input YUV->RGB conversion followed by
+// the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this
+// file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match
+// DataSpace.
+
+Color yuv709To601(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.101579f * e_gamma.u + 0.196076f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.989854f * e_gamma.u + -0.110653f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.072453f * e_gamma.u + 0.983398f * e_gamma.v }}};
+}
+
+Color yuv709To2100(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.016969f * e_gamma.u + 0.096312f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.995306f * e_gamma.u + -0.051192f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.011507f * e_gamma.u + 1.002637f * e_gamma.v }}};
+}
+
+Color yuv601To709(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.018640f * e_gamma.u + 0.114618f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.075049f * e_gamma.u + 1.025327f * e_gamma.v }}};
+}
+
+Color yuv601To2100(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.010016f * e_gamma.u + 0.061592f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.086969f * e_gamma.u + 1.029350f * e_gamma.v }}};
+}
+
+Color yuv2100To709(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.018149f * e_gamma.u + -0.095132f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.004123f * e_gamma.u + 0.051267f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.011524f * e_gamma.u + 0.996782f * e_gamma.v }}};
+}
+
+Color yuv2100To601(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.117887f * e_gamma.u + 0.105521f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.995211f * e_gamma.u + -0.059549f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.084085f * e_gamma.u + 0.976518f * e_gamma.v }}};
+}
+
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+ ColorTransformFn fn) {
+ Color yuv1 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 );
+ Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 );
+ Color yuv3 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 + 1);
+ Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1);
+
+ yuv1 = fn(yuv1);
+ yuv2 = fn(yuv2);
+ yuv3 = fn(yuv3);
+ yuv4 = fn(yuv4);
+
+ Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f;
+
+ size_t pixel_y1_idx = x_chroma * 2 + y_chroma * 2 * image->width;
+ size_t pixel_y2_idx = (x_chroma * 2 + 1) + y_chroma * 2 * image->width;
+ size_t pixel_y3_idx = x_chroma * 2 + (y_chroma * 2 + 1) * image->width;
+ size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->width;
+
+ uint8_t& y1_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y1_idx];
+ uint8_t& y2_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y2_idx];
+ uint8_t& y3_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y3_idx];
+ uint8_t& y4_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y4_idx];
+
+ size_t pixel_count = image->width * image->height;
+ size_t pixel_uv_idx = x_chroma + y_chroma * (image->width / 2);
+
+ uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
+ uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+
+ y1_uint = static_cast<uint8_t>(floor(yuv1.y * 255.0f + 0.5f));
+ y2_uint = static_cast<uint8_t>(floor(yuv2.y * 255.0f + 0.5f));
+ y3_uint = static_cast<uint8_t>(floor(yuv3.y * 255.0f + 0.5f));
+ y4_uint = static_cast<uint8_t>(floor(yuv4.y * 255.0f + 0.5f));
+
+ u_uint = static_cast<uint8_t>(floor(new_uv.u * 255.0f + 128.0f + 0.5f));
+ v_uint = static_cast<uint8_t>(floor(new_uv.v * 255.0f + 128.0f + 0.5f));
+}
////////////////////////////////////////////////////////////////////////////////
// Gain map calculations
diff --git a/libs/ultrahdr/icc.cpp b/libs/ultrahdr/icc.cpp
index c807705..1ab3c7c 100644
--- a/libs/ultrahdr/icc.cpp
+++ b/libs/ultrahdr/icc.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#ifndef USE_BIG_ENDIAN
+#define USE_BIG_ENDIAN true
+#endif
+
#include <ultrahdr/icc.h>
#include <ultrahdr/gainmapmath.h>
#include <vector>
@@ -180,7 +184,7 @@
uint32_t total_length = text_length * 2 + sizeof(header);
total_length = (((total_length + 2) >> 2) << 2); // 4 aligned
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
if (!dataStruct->write(header, sizeof(header))) {
ALOGE("write_text_tag(): error in writing data");
@@ -204,7 +208,7 @@
static_cast<uint32_t>(Endian_SwapBE32(float_round_to_fixed(y))),
static_cast<uint32_t>(Endian_SwapBE32(float_round_to_fixed(z))),
};
- sp<DataStruct> dataStruct = new DataStruct(sizeof(data));
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(sizeof(data));
dataStruct->write(&data, sizeof(data));
return dataStruct;
}
@@ -212,7 +216,7 @@
sp<DataStruct> IccHelper::write_trc_tag(const int table_entries, const void* table_16) {
int total_length = 4 + 4 + 4 + table_entries * 2;
total_length = (((total_length + 2) >> 2) << 2); // 4 aligned
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
dataStruct->write32(Endian_SwapBE32(kTAG_CurveType)); // Type
dataStruct->write32(0); // Reserved
dataStruct->write32(Endian_SwapBE32(table_entries)); // Value count
@@ -225,7 +229,7 @@
sp<DataStruct> IccHelper::write_trc_tag_for_linear() {
int total_length = 16;
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType)); // Type
dataStruct->write32(0); // Reserved
dataStruct->write32(Endian_SwapBE16(kExponential_ParaCurveType));
@@ -263,7 +267,7 @@
sp<DataStruct> IccHelper::write_cicp_tag(uint32_t color_primaries,
uint32_t transfer_characteristics) {
int total_length = 12; // 4 + 4 + 1 + 1 + 1 + 1
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
dataStruct->write32(Endian_SwapBE32(kTAG_cicp)); // Type signature
dataStruct->write32(0); // Reserved
dataStruct->write8(color_primaries); // Color primaries
@@ -314,7 +318,7 @@
int total_length = 20 + 2 * value_count;
total_length = (((total_length + 2) >> 2) << 2); // 4 aligned
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
for (size_t i = 0; i < 16; ++i) {
dataStruct->write8(i < kNumChannels ? grid_points[i] : 0); // Grid size
@@ -372,7 +376,7 @@
total_length += a_curves_data[i]->getLength();
}
}
- sp<DataStruct> dataStruct = new DataStruct(total_length);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
dataStruct->write32(Endian_SwapBE32(type)); // Type signature
dataStruct->write32(0); // Reserved
dataStruct->write8(kNumChannels); // Input channels
@@ -421,7 +425,7 @@
break;
default:
// Should not fall here.
- return new DataStruct(0);
+ return nullptr;
}
// Compute primaries.
@@ -540,13 +544,21 @@
size_t tag_table_size = kICCTagTableEntrySize * tags.size();
size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size;
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(profile_size + kICCIdentifierSize);
+
+ // Write identifier, chunk count, and chunk ID
+ if (!dataStruct->write(kICCIdentifier, sizeof(kICCIdentifier)) ||
+ !dataStruct->write8(1) || !dataStruct->write8(1)) {
+ ALOGE("writeIccProfile(): error in identifier");
+ return dataStruct;
+ }
+
// Write the header.
header.data_color_space = Endian_SwapBE32(Signature_RGB);
header.pcs = Endian_SwapBE32(tf == ULTRAHDR_TF_PQ ? Signature_Lab : Signature_XYZ);
header.size = Endian_SwapBE32(profile_size);
header.tag_count = Endian_SwapBE32(tags.size());
- sp<DataStruct> dataStruct = new DataStruct(profile_size);
if (!dataStruct->write(&header, sizeof(header))) {
ALOGE("writeIccProfile(): error in header");
return dataStruct;
@@ -582,4 +594,84 @@
return dataStruct;
}
-} // namespace android::ultrahdr
\ No newline at end of file
+bool IccHelper::tagsEqualToMatrix(const Matrix3x3& matrix,
+ const uint8_t* red_tag,
+ const uint8_t* green_tag,
+ const uint8_t* blue_tag) {
+ sp<DataStruct> red_tag_test = write_xyz_tag(matrix.vals[0][0], matrix.vals[1][0],
+ matrix.vals[2][0]);
+ sp<DataStruct> green_tag_test = write_xyz_tag(matrix.vals[0][1], matrix.vals[1][1],
+ matrix.vals[2][1]);
+ sp<DataStruct> blue_tag_test = write_xyz_tag(matrix.vals[0][2], matrix.vals[1][2],
+ matrix.vals[2][2]);
+ return memcmp(red_tag, red_tag_test->getData(), kColorantTagSize) == 0 &&
+ memcmp(green_tag, green_tag_test->getData(), kColorantTagSize) == 0 &&
+ memcmp(blue_tag, blue_tag_test->getData(), kColorantTagSize) == 0;
+}
+
+ultrahdr_color_gamut IccHelper::readIccColorGamut(void* icc_data, size_t icc_size) {
+ // Each tag table entry consists of 3 fields of 4 bytes each.
+ static const size_t kTagTableEntrySize = 12;
+
+ if (icc_data == nullptr || icc_size < sizeof(ICCHeader) + kICCIdentifierSize) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ if (memcmp(icc_data, kICCIdentifier, sizeof(kICCIdentifier)) != 0) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc_data) + kICCIdentifierSize;
+
+ ICCHeader* header = reinterpret_cast<ICCHeader*>(icc_bytes);
+
+ // Use 0 to indicate not found, since offsets are always relative to start
+ // of ICC data and therefore a tag offset of zero would never be valid.
+ size_t red_primary_offset = 0, green_primary_offset = 0, blue_primary_offset = 0;
+ size_t red_primary_size = 0, green_primary_size = 0, blue_primary_size = 0;
+ for (size_t tag_idx = 0; tag_idx < Endian_SwapBE32(header->tag_count); ++tag_idx) {
+ uint32_t* tag_entry_start = reinterpret_cast<uint32_t*>(
+ icc_bytes + sizeof(ICCHeader) + tag_idx * kTagTableEntrySize);
+ // first 4 bytes are the tag signature, next 4 bytes are the tag offset,
+ // last 4 bytes are the tag length in bytes.
+ if (red_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_rXYZ)) {
+ red_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ red_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ } else if (green_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_gXYZ)) {
+ green_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ green_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ } else if (blue_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_bXYZ)) {
+ blue_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ blue_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ }
+ }
+
+ if (red_primary_offset == 0 || red_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + red_primary_offset + red_primary_size > icc_size ||
+ green_primary_offset == 0 || green_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + green_primary_offset + green_primary_size > icc_size ||
+ blue_primary_offset == 0 || blue_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + blue_primary_offset + blue_primary_size > icc_size) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ uint8_t* red_tag = icc_bytes + red_primary_offset;
+ uint8_t* green_tag = icc_bytes + green_primary_offset;
+ uint8_t* blue_tag = icc_bytes + blue_primary_offset;
+
+ // Serialize tags as we do on encode and compare what we find to that to
+ // determine the gamut (since we don't have a need yet for full deserialize).
+ if (tagsEqualToMatrix(kSRGB, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_BT709;
+ } else if (tagsEqualToMatrix(kDisplayP3, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_P3;
+ } else if (tagsEqualToMatrix(kRec2020, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_BT2100;
+ }
+
+ // Didn't find a match to one of the profiles we write; indicate the gamut
+ // is unspecified since we don't understand it.
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/include/ultrahdr/gainmapmath.h b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
index abc9356..13832db 100644
--- a/libs/ultrahdr/include/ultrahdr/gainmapmath.h
+++ b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
@@ -218,24 +218,30 @@
// except for those concerning transfer functions.
/*
- * Calculate the luminance of a linear RGB sRGB pixel, according to IEC 61966-2-1.
+ * Calculate the luminance of a linear RGB sRGB pixel, according to
+ * IEC 61966-2-1/Amd 1:2003.
*
* [0.0, 1.0] range in and out.
*/
float srgbLuminance(Color e);
/*
- * Convert from OETF'd srgb YUV to RGB, according to ECMA TR/98.
+ * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color srgbRgbToYuv(Color e_gamma);
+
+
+/*
+ * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
Color srgbYuvToRgb(Color e_gamma);
/*
- * Convert from OETF'd srgb RGB to YUV, according to ECMA TR/98.
- */
-Color srgbRgbToYuv(Color e_gamma);
-
-/*
- * Convert from srgb to linear, according to IEC 61966-2-1.
+ * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003.
*
* [0.0, 1.0] range in and out.
*/
@@ -257,6 +263,20 @@
*/
float p3Luminance(Color e);
+/*
+ * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3RgbToYuv(Color e_gamma);
+
+/*
+ * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3YuvToRgb(Color e_gamma);
+
////////////////////////////////////////////////////////////////////////////////
// BT.2100 transformations - according to ITU-R BT.2100-2
@@ -269,12 +289,16 @@
float bt2100Luminance(Color e);
/*
- * Convert from OETF'd BT.2100 RGB to YUV.
+ * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
Color bt2100RgbToYuv(Color e_gamma);
/*
- * Convert from OETF'd BT.2100 YUV to RGB.
+ * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
Color bt2100YuvToRgb(Color e_gamma);
@@ -358,6 +382,31 @@
*/
ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut);
+/*
+ * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2.
+ *
+ * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is
+ * treated as Bt.601 by DataSpace, hence we do the same.
+ */
+Color yuv709To601(Color e_gamma);
+Color yuv709To2100(Color e_gamma);
+Color yuv601To709(Color e_gamma);
+Color yuv601To2100(Color e_gamma);
+Color yuv2100To709(Color e_gamma);
+Color yuv2100To601(Color e_gamma);
+
+/*
+ * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image.
+ *
+ * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets
+ * this result, and UV gets the averaged result.
+ *
+ * x_chroma and y_chroma should be less than or equal to half the image's width and height
+ * respecitively, since input is 4:2:0 subsampled.
+ */
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+ ColorTransformFn fn);
+
////////////////////////////////////////////////////////////////////////////////
// Gain map calculations
diff --git a/libs/ultrahdr/include/ultrahdr/icc.h b/libs/ultrahdr/include/ultrahdr/icc.h
index 7f6ab88..7f047f8 100644
--- a/libs/ultrahdr/include/ultrahdr/icc.h
+++ b/libs/ultrahdr/include/ultrahdr/icc.h
@@ -56,12 +56,16 @@
Signature_XYZ = 0x58595A20,
};
-
typedef uint32_t FourByteTag;
static inline constexpr FourByteTag SetFourByteTag(char a, char b, char c, char d) {
return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d);
}
+static constexpr char kICCIdentifier[] = "ICC_PROFILE";
+// 12 for the actual identifier, +2 for the chunk count and chunk index which
+// will always follow.
+static constexpr size_t kICCIdentifierSize = 14;
+
// This is equal to the header size according to the ICC specification (128)
// plus the size of the tag count (4). We include the tag count since we
// always require it to be present anyway.
@@ -70,6 +74,10 @@
// Contains a signature (4), offset (4), and size (4).
static constexpr size_t kICCTagTableEntrySize = 12;
+// size should be 20; 4 bytes for type descriptor, 4 bytes reserved, 12
+// bytes for a single XYZ number type (4 bytes per coordinate).
+static constexpr size_t kColorantTagSize = 20;
+
static constexpr uint32_t kDisplay_Profile = SetFourByteTag('m', 'n', 't', 'r');
static constexpr uint32_t kRGB_ColorSpace = SetFourByteTag('R', 'G', 'B', ' ');
static constexpr uint32_t kXYZ_PCSSpace = SetFourByteTag('X', 'Y', 'Z', ' ');
@@ -225,10 +233,23 @@
static void compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]);
static sp<DataStruct> write_clut(const uint8_t* grid_points, const uint8_t* grid_16);
+ // Checks if a set of xyz tags is equivalent to a 3x3 Matrix. Each input
+ // tag buffer assumed to be at least kColorantTagSize in size.
+ static bool tagsEqualToMatrix(const Matrix3x3& matrix,
+ const uint8_t* red_tag,
+ const uint8_t* green_tag,
+ const uint8_t* blue_tag);
+
public:
+ // Output includes JPEG embedding identifier and chunk information, but not
+ // APPx information.
static sp<DataStruct> writeIccProfile(const ultrahdr_transfer_function tf,
const ultrahdr_color_gamut gamut);
+ // NOTE: this function is not robust; it can infer gamuts that IccHelper
+ // writes out but should not be considered a reference implementation for
+ // robust parsing of ICC profiles or their gamuts.
+ static ultrahdr_color_gamut readIccColorGamut(void* icc_data, size_t icc_size);
};
} // namespace android::ultrahdr
-#endif //ANDROID_ULTRAHDR_ICC_H
\ No newline at end of file
+#endif //ANDROID_ULTRAHDR_ICC_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
index f642bad..8b5499a 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
@@ -25,6 +25,10 @@
}
#include <utils/Errors.h>
#include <vector>
+
+static const int kMaxWidth = 8192;
+static const int kMaxHeight = 8192;
+
namespace android::ultrahdr {
/*
* Encapsulates a converter from JPEG to raw image (YUV420planer or grey-scale) format.
@@ -79,11 +83,14 @@
*/
size_t getEXIFSize();
/*
- * Returns the position offset of EXIF package
- * (4 bypes offset to FF sign, the byte after FF E1 XX XX <this byte>),
- * or -1 if no EXIF exists.
+ * Returns the ICC data from the image.
*/
- int getEXIFPos() { return mExifPos; }
+ void* getICCPtr();
+ /*
+ * Returns the decompressed ICC buffer size. This method must be called only after
+ * calling decompressImage() or getCompressedImageParameters().
+ */
+ size_t getICCSize();
/*
* Decompresses metadata of the image. All vectors are owned by the caller.
*/
@@ -108,12 +115,12 @@
std::vector<JOCTET> mXMPBuffer;
// The buffer that holds EXIF Data.
std::vector<JOCTET> mEXIFBuffer;
+ // The buffer that holds ICC Data.
+ std::vector<JOCTET> mICCBuffer;
// Resolution of the decompressed image.
size_t mWidth;
size_t mHeight;
- // Position of EXIF package, default value is -1 which means no EXIF package appears.
- size_t mExifPos;
};
} /* namespace android::ultrahdr */
diff --git a/libs/ultrahdr/include/ultrahdr/jpegr.h b/libs/ultrahdr/include/ultrahdr/jpegr.h
index 1f9bd0f..9546ca4 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegr.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegr.h
@@ -125,7 +125,7 @@
*
* Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append
* the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same
- * resolution.
+ * resolution. SDR input is assumed to use the sRGB transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param hdr_tf transfer function of the HDR image
@@ -152,7 +152,9 @@
* This method requires HAL Hardware JPEG encoder.
*
* Generate gain map from the HDR and SDR inputs, append the gain map to the end of the
- * compressed JPEG. HDR and SDR inputs must be the same resolution and color space.
+ * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and
+ * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB
+ * transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* Note: the SDR image must be the decoded version of the JPEG
@@ -178,8 +180,9 @@
* This method requires HAL Hardware JPEG encoder.
*
* Decode the compressed 8-bit JPEG image to YUV SDR, generate gain map from the HDR input
- * and the decoded SDR result, append the gain map to the end of the compressed JPEG. HDR
- * and SDR inputs must be the same resolution.
+ * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an
+ * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same
+ * resolution. JPEG image is assumed to use the sRGB transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param hdr_tf transfer function of the HDR image
@@ -198,7 +201,8 @@
* Encode API-4
* Assemble JPEGR image from SDR JPEG and gainmap JPEG.
*
- * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format.
+ * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC
+ * profile if one isn't present in the input JPEG image.
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param compressed_gainmap compressed 8-bit JPEG single channel image
* @param metadata metadata to be written in XMP of the primary jpeg
@@ -217,6 +221,9 @@
* Decode API
* Decompress JPEGR image.
*
+ * This method assumes that the JPEGR image contains an ICC profile with primaries that match
+ * those of a color gamut that this library is aware of; Bt.709, Display-P3, or Bt.2100.
+ *
* @param compressed_jpegr_image compressed JPEGR image.
* @param dest destination of the uncompressed JPEGR image.
* @param max_display_boost (optional) the maximum available boost supported by a display,
@@ -270,26 +277,30 @@
/*
* This method is called in the encoding pipeline. It will take the uncompressed 8-bit and
* 10-bit yuv images as input, and calculate the uncompressed gain map. The input images
- * must be the same resolution.
+ * must be the same resolution. The SDR input is assumed to use the sRGB transfer function.
*
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param hdr_tf transfer function of the HDR image
* @param dest gain map; caller responsible for memory of data
* @param metadata max_content_boost is filled in
+ * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut
* @return NO_ERROR if calculation succeeds, error code if error occurs.
*/
status_t generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
jr_uncompressed_ptr uncompressed_p010_image,
ultrahdr_transfer_function hdr_tf,
ultrahdr_metadata_ptr metadata,
- jr_uncompressed_ptr dest);
+ jr_uncompressed_ptr dest,
+ bool sdr_is_601 = false);
/*
* This method is called in the decoding pipeline. It will take the uncompressed (decoded)
* 8-bit yuv image, the uncompressed (decoded) gain map, and extracted JPEG/R metadata as
* input, and calculate the 10-bit recovered image. The recovered output image is the same
* color gamut as the SDR image, with HLG transfer function, and is in RGBA1010102 data format.
+ * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to
+ * be a decoded JPEG for the purpose of YUV interpration.
*
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param uncompressed_gain_map uncompressed gain map
@@ -353,6 +364,8 @@
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param compress_gain_map compressed recover map
* @param (nullable) exif EXIF package
+ * @param (nullable) icc ICC package
+ * @param icc_size length in bytes of ICC package
* @param metadata JPEG/R metadata to encode in XMP of the jpeg
* @param dest compressed JPEGR image
* @return NO_ERROR if calculation succeeds, error code if error occurs.
@@ -360,6 +373,7 @@
status_t appendGainMap(jr_compressed_ptr compressed_jpeg_image,
jr_compressed_ptr compressed_gain_map,
jr_exif_ptr exif,
+ void* icc, size_t icc_size,
ultrahdr_metadata_ptr metadata,
jr_compressed_ptr dest);
@@ -374,6 +388,22 @@
jr_uncompressed_ptr dest);
/*
+ * This method will convert a YUV420 image from one YUV encoding to another in-place (eg.
+ * Bt.709 to Bt.601 YUV encoding).
+ *
+ * src_encoding and dest_encoding indicate the encoding via the YUV conversion defined for that
+ * gamut. P3 indicates Rec.601, since this is how DataSpace encodes Display-P3 YUV data.
+ *
+ * @param image the YUV420 image to convert
+ * @param src_encoding input YUV encoding
+ * @param dest_encoding output YUV encoding
+ * @return NO_ERROR if calculation succeeds, error code if error occurs.
+ */
+ status_t convertYuv(jr_uncompressed_ptr image,
+ ultrahdr_color_gamut src_encoding,
+ ultrahdr_color_gamut dest_encoding);
+
+ /*
* This method will check the validity of the input arguments.
*
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
diff --git a/libs/ultrahdr/jpegdecoderhelper.cpp b/libs/ultrahdr/jpegdecoderhelper.cpp
index fac90c5..fef5444 100644
--- a/libs/ultrahdr/jpegdecoderhelper.cpp
+++ b/libs/ultrahdr/jpegdecoderhelper.cpp
@@ -93,7 +93,6 @@
}
JpegDecoderHelper::JpegDecoderHelper() {
- mExifPos = 0;
}
JpegDecoderHelper::~JpegDecoderHelper() {
@@ -138,6 +137,14 @@
return mEXIFBuffer.size();
}
+void* JpegDecoderHelper::getICCPtr() {
+ return mICCBuffer.data();
+}
+
+size_t JpegDecoderHelper::getICCSize() {
+ return mICCBuffer.size();
+}
+
size_t JpegDecoderHelper::getDecompressedImageWidth() {
return mWidth;
}
@@ -150,6 +157,7 @@
jpeg_decompress_struct cinfo;
jpegr_source_mgr mgr(static_cast<const uint8_t*>(image), length);
jpegrerror_mgr myerr;
+ bool status = true;
cinfo.err = jpeg_std_error(&myerr.pub);
myerr.pub.error_exit = jpegrerror_exit;
@@ -167,31 +175,21 @@
cinfo.src = &mgr;
jpeg_read_header(&cinfo, TRUE);
- // Save XMP data and EXIF data.
- // Here we only handle the first XMP / EXIF package.
- // The parameter pos is used for capturing start offset of EXIF, which is hacky, but working...
+ // Save XMP data, EXIF data, and ICC data.
+ // Here we only handle the first XMP / EXIF / ICC package.
// We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package),
// two bytes of package length which is stored in marker->original_length, and the real data
- // which is stored in marker->data. The pos is adding up all previous package lengths (
- // 4 bytes marker and length, marker->original_length) before EXIF appears. Note that here we
- // we are using marker->original_length instead of marker->data_length because in case the real
- // package length is larger than the limitation, jpeg-turbo will only copy the data within the
- // limitation (represented by data_length) and this may vary from original_length / real offset.
- // A better solution is making jpeg_marker_struct holding the offset, but currently it doesn't.
+ // which is stored in marker->data.
bool exifAppears = false;
bool xmpAppears = false;
- size_t pos = 2; // position after SOI
+ bool iccAppears = false;
for (jpeg_marker_struct* marker = cinfo.marker_list;
- marker && !(exifAppears && xmpAppears);
+ marker && !(exifAppears && xmpAppears && iccAppears);
marker = marker->next) {
- pos += 4;
- pos += marker->original_length;
-
- if (marker->marker != kAPP1Marker) {
+ if (marker->marker != kAPP1Marker && marker->marker != kAPP2Marker) {
continue;
}
-
const unsigned int len = marker->data_length;
if (!xmpAppears &&
len > kXmpNameSpace.size() &&
@@ -209,17 +207,31 @@
mEXIFBuffer.resize(len, 0);
memcpy(static_cast<void*>(mEXIFBuffer.data()), marker->data, len);
exifAppears = true;
- mExifPos = pos - marker->original_length;
+ } else if (!iccAppears &&
+ len > sizeof(kICCSig) &&
+ !memcmp(marker->data, kICCSig, sizeof(kICCSig))) {
+ mICCBuffer.resize(len, 0);
+ memcpy(static_cast<void*>(mICCBuffer.data()), marker->data, len);
+ iccAppears = true;
}
}
+ if (cinfo.image_width > kMaxWidth || cinfo.image_height > kMaxHeight) {
+ // constraint on max width and max height is only due to alloc constraints
+ // tune these values basing on the target device
+ status = false;
+ goto CleanUp;
+ }
+
mWidth = cinfo.image_width;
mHeight = cinfo.image_height;
if (decodeToRGBA) {
if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
// We don't intend to support decoding grayscale to RGBA
- return false;
+ status = false;
+ ALOGE("%s: decoding grayscale to RGBA is unsupported", __func__);
+ goto CleanUp;
}
// 4 bytes per pixel
mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 4);
@@ -232,7 +244,9 @@
cinfo.comp_info[0].v_samp_factor != 2 ||
cinfo.comp_info[1].v_samp_factor != 1 ||
cinfo.comp_info[2].v_samp_factor != 1) {
- return false;
+ status = false;
+ ALOGE("%s: decoding to YUV only supports 4:2:0 subsampling", __func__);
+ goto CleanUp;
}
mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0);
} else if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
@@ -248,13 +262,15 @@
if (!decompress(&cinfo, static_cast<const uint8_t*>(mResultBuffer.data()),
cinfo.jpeg_color_space == JCS_GRAYSCALE)) {
- return false;
+ status = false;
+ goto CleanUp;
}
+CleanUp:
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
- return true;
+ return status;
}
bool JpegDecoderHelper::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest,
@@ -292,8 +308,12 @@
return false;
}
- *pWidth = cinfo.image_width;
- *pHeight = cinfo.image_height;
+ if (pWidth != nullptr) {
+ *pWidth = cinfo.image_width;
+ }
+ if (pHeight != nullptr) {
+ *pHeight = cinfo.image_height;
+ }
if (iccData != nullptr) {
for (jpeg_marker_struct* marker = cinfo.marker_list; marker;
@@ -306,9 +326,7 @@
continue;
}
- const unsigned int len = marker->data_length - kICCMarkerHeaderSize;
- const uint8_t *src = marker->data + kICCMarkerHeaderSize;
- iccData->insert(iccData->end(), src, src+len);
+ iccData->insert(iccData->end(), marker->data, marker->data + marker->data_length);
}
}
@@ -361,7 +379,7 @@
uint8_t* y_plane = const_cast<uint8_t*>(dest);
uint8_t* u_plane = const_cast<uint8_t*>(dest + y_plane_size);
uint8_t* v_plane = const_cast<uint8_t*>(dest + y_plane_size + uv_plane_size);
- std::unique_ptr<uint8_t[]> empty(new uint8_t[cinfo->image_width]);
+ std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
memset(empty.get(), 0, cinfo->image_width);
const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
@@ -435,7 +453,7 @@
JSAMPARRAY planes[1] {y};
uint8_t* y_plane = const_cast<uint8_t*>(dest);
- std::unique_ptr<uint8_t[]> empty(new uint8_t[cinfo->image_width]);
+ std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
memset(empty.get(), 0, cinfo->image_width);
int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
diff --git a/libs/ultrahdr/jpegencoderhelper.cpp b/libs/ultrahdr/jpegencoderhelper.cpp
index ab2f8c7..a03547b 100644
--- a/libs/ultrahdr/jpegencoderhelper.cpp
+++ b/libs/ultrahdr/jpegencoderhelper.cpp
@@ -107,12 +107,11 @@
jpeg_write_marker(&cinfo, JPEG_APP0 + 2, static_cast<const JOCTET*>(iccBuffer), iccSize);
}
- if (!compress(&cinfo, static_cast<const uint8_t*>(image), isSingleChannel)) {
- return false;
- }
+ bool status = compress(&cinfo, static_cast<const uint8_t*>(image), isSingleChannel);
jpeg_finish_compress(&cinfo);
jpeg_destroy_compress(&cinfo);
- return true;
+
+ return status;
}
void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) {
@@ -174,7 +173,7 @@
uint8_t* y_plane = const_cast<uint8_t*>(yuv);
uint8_t* u_plane = const_cast<uint8_t*>(yuv + y_plane_size);
uint8_t* v_plane = const_cast<uint8_t*>(yuv + y_plane_size + uv_plane_size);
- std::unique_ptr<uint8_t[]> empty(new uint8_t[cinfo->image_width]);
+ std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
memset(empty.get(), 0, cinfo->image_width);
const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
@@ -250,7 +249,7 @@
JSAMPARRAY planes[1] {y};
uint8_t* y_plane = const_cast<uint8_t*>(image);
- std::unique_ptr<uint8_t[]> empty(new uint8_t[cinfo->image_width]);
+ std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
memset(empty.get(), 0, cinfo->image_width);
const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
diff --git a/libs/ultrahdr/jpegr.cpp b/libs/ultrahdr/jpegr.cpp
index b2bde6c..9af5af7 100644
--- a/libs/ultrahdr/jpegr.cpp
+++ b/libs/ultrahdr/jpegr.cpp
@@ -119,6 +119,13 @@
return ERROR_JPEGR_INVALID_INPUT_TYPE;
}
+ if (uncompressed_p010_image->width > kMaxWidth
+ || uncompressed_p010_image->height > kMaxHeight) {
+ ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %dx%d",
+ kMaxWidth, kMaxHeight, uncompressed_p010_image->width, uncompressed_p010_image->height);
+ return ERROR_JPEGR_INVALID_INPUT_TYPE;
+ }
+
if (uncompressed_p010_image->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED
|| uncompressed_p010_image->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
ALOGE("Unrecognized p010 color gamut %d", uncompressed_p010_image->colorGamut);
@@ -251,6 +258,10 @@
sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
uncompressed_yuv_420_image.colorGamut);
+ // Convert to Bt601 YUV encoding for JPEG encode
+ JPEGR_CHECK(convertYuv(&uncompressed_yuv_420_image, uncompressed_yuv_420_image.colorGamut,
+ ULTRAHDR_COLORGAMUT_P3));
+
JpegEncoderHelper jpeg_encoder;
if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image.data,
uncompressed_yuv_420_image.width,
@@ -262,7 +273,9 @@
jpeg.data = jpeg_encoder.getCompressedImagePtr();
jpeg.length = jpeg_encoder.getCompressedImageSize();
- JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, &metadata, dest));
+ // No ICC since JPEG encode already did it
+ JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+ &metadata, dest));
return NO_ERROR;
}
@@ -310,10 +323,22 @@
sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
uncompressed_yuv_420_image->colorGamut);
+ // Convert to Bt601 YUV encoding for JPEG encode; make a copy so as to no clobber client data
+ unique_ptr<uint8_t[]> yuv_420_bt601_data = make_unique<uint8_t[]>(
+ uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+ memcpy(yuv_420_bt601_data.get(), uncompressed_yuv_420_image->data,
+ uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+
+ jpegr_uncompressed_struct yuv_420_bt601_image = {
+ yuv_420_bt601_data.get(), uncompressed_yuv_420_image->width, uncompressed_yuv_420_image->height,
+ uncompressed_yuv_420_image->colorGamut };
+ JPEGR_CHECK(convertYuv(&yuv_420_bt601_image, yuv_420_bt601_image.colorGamut,
+ ULTRAHDR_COLORGAMUT_P3));
+
JpegEncoderHelper jpeg_encoder;
- if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image->data,
- uncompressed_yuv_420_image->width,
- uncompressed_yuv_420_image->height, quality,
+ if (!jpeg_encoder.compressImage(yuv_420_bt601_image.data,
+ yuv_420_bt601_image.width,
+ yuv_420_bt601_image.height, quality,
icc->getData(), icc->getLength())) {
return ERROR_JPEGR_ENCODE_ERROR;
}
@@ -321,7 +346,9 @@
jpeg.data = jpeg_encoder.getCompressedImagePtr();
jpeg.length = jpeg_encoder.getCompressedImageSize();
- JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, &metadata, dest));
+ // No ICC since jpeg encode already did it
+ JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+ &metadata, dest));
return NO_ERROR;
}
@@ -364,7 +391,24 @@
compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, nullptr, &metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ uncompressed_yuv_420_image->colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), &metadata, dest));
+ }
return NO_ERROR;
}
@@ -385,6 +429,7 @@
return ret;
}
+ // Note: output is Bt.601 YUV encoded regardless of gamut, due to jpeg decode.
JpegDecoderHelper jpeg_decoder;
if (!jpeg_decoder.decompressImage(compressed_jpeg_image->data, compressed_jpeg_image->length)) {
return ERROR_JPEGR_DECODE_ERROR;
@@ -404,8 +449,10 @@
metadata.version = kJpegrVersion;
jpegr_uncompressed_struct map;
+ // Indicate that the SDR image is Bt.601 YUV encoded.
JPEGR_CHECK(generateGainMap(
- &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+ &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map,
+ true /* sdr_is_601 */ ));
std::unique_ptr<uint8_t[]> map_data;
map_data.reset(reinterpret_cast<uint8_t*>(map.data));
@@ -417,7 +464,24 @@
compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, nullptr, &metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ uncompressed_yuv_420_image.colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), &metadata, dest));
+ }
return NO_ERROR;
}
@@ -442,8 +506,25 @@
return ERROR_JPEGR_INVALID_NULL_PTR;
}
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
- metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ compressed_jpeg_image->colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), metadata, dest));
+ }
+
return NO_ERROR;
}
@@ -606,6 +687,9 @@
uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
+ uncompressed_yuv_420_image.colorGamut = IccHelper::readIccColorGamut(
+ jpeg_decoder.getICCPtr(), jpeg_decoder.getICCSize());
+
JPEGR_CHECK(applyGainMap(&uncompressed_yuv_420_image, &map, &uhdr_metadata, output_format,
max_display_boost, dest));
return NO_ERROR;
@@ -617,6 +701,7 @@
return ERROR_JPEGR_INVALID_NULL_PTR;
}
+ // Don't need to convert YUV to Bt601 since single channel
if (!jpeg_encoder->compressImage(uncompressed_gain_map->data,
uncompressed_gain_map->width,
uncompressed_gain_map->height,
@@ -692,7 +777,8 @@
jr_uncompressed_ptr uncompressed_p010_image,
ultrahdr_transfer_function hdr_tf,
ultrahdr_metadata_ptr metadata,
- jr_uncompressed_ptr dest) {
+ jr_uncompressed_ptr dest,
+ bool sdr_is_601) {
if (uncompressed_yuv_420_image == nullptr
|| uncompressed_p010_image == nullptr
|| metadata == nullptr
@@ -726,7 +812,7 @@
map_data.reset(reinterpret_cast<uint8_t*>(dest->data));
ColorTransformFn hdrInvOetf = nullptr;
- float hdr_white_nits = 0.0f;
+ float hdr_white_nits = kSdrWhiteNits;
switch (hdr_tf) {
case ULTRAHDR_TF_LINEAR:
hdrInvOetf = identityConversion;
@@ -761,15 +847,38 @@
uncompressed_yuv_420_image->colorGamut, uncompressed_p010_image->colorGamut);
ColorCalculationFn luminanceFn = nullptr;
+ ColorTransformFn sdrYuvToRgbFn = nullptr;
switch (uncompressed_yuv_420_image->colorGamut) {
case ULTRAHDR_COLORGAMUT_BT709:
luminanceFn = srgbLuminance;
+ sdrYuvToRgbFn = srgbYuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_P3:
luminanceFn = p3Luminance;
+ sdrYuvToRgbFn = p3YuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_BT2100:
luminanceFn = bt2100Luminance;
+ sdrYuvToRgbFn = bt2100YuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+ // Should be impossible to hit after input validation.
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ if (sdr_is_601) {
+ sdrYuvToRgbFn = p3YuvToRgb;
+ }
+
+ ColorTransformFn hdrYuvToRgbFn = nullptr;
+ switch (uncompressed_p010_image->colorGamut) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ hdrYuvToRgbFn = srgbYuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ hdrYuvToRgbFn = p3YuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ hdrYuvToRgbFn = bt2100YuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
// Should be impossible to hit after input validation.
@@ -783,8 +892,8 @@
std::function<void()> generateMap = [uncompressed_yuv_420_image, uncompressed_p010_image,
metadata, dest, hdrInvOetf, hdrGamutConversionFn,
- luminanceFn, hdr_white_nits, log2MinBoost, log2MaxBoost,
- &jobQueue]() -> void {
+ luminanceFn, sdrYuvToRgbFn, hdrYuvToRgbFn, hdr_white_nits,
+ log2MinBoost, log2MaxBoost, &jobQueue]() -> void {
size_t rowStart, rowEnd;
size_t dest_map_width = uncompressed_yuv_420_image->width / kMapDimensionScaleFactor;
size_t dest_map_stride = dest->width;
@@ -793,7 +902,8 @@
for (size_t x = 0; x < dest_map_width; ++x) {
Color sdr_yuv_gamma =
sampleYuv420(uncompressed_yuv_420_image, kMapDimensionScaleFactor, x, y);
- Color sdr_rgb_gamma = srgbYuvToRgb(sdr_yuv_gamma);
+ Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma);
+ // We are assuming the SDR input is always sRGB transfer.
#if USE_SRGB_INVOETF_LUT
Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma);
#else
@@ -802,7 +912,7 @@
float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;
Color hdr_yuv_gamma = sampleP010(uncompressed_p010_image, kMapDimensionScaleFactor, x, y);
- Color hdr_rgb_gamma = bt2100YuvToRgb(hdr_yuv_gamma);
+ Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma);
Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
hdr_rgb = hdrGamutConversionFn(hdr_rgb);
float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;
@@ -880,7 +990,9 @@
for (size_t y = rowStart; y < rowEnd; ++y) {
for (size_t x = 0; x < width; ++x) {
Color yuv_gamma_sdr = getYuv420Pixel(uncompressed_yuv_420_image, x, y);
- Color rgb_gamma_sdr = srgbYuvToRgb(yuv_gamma_sdr);
+ // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients
+ Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr);
+ // We are assuming the SDR base image is always sRGB transfer.
#if USE_SRGB_INVOETF_LUT
Color rgb_sdr = srgbInvOetfLUT(rgb_gamma_sdr);
#else
@@ -1058,6 +1170,7 @@
status_t JpegR::appendGainMap(jr_compressed_ptr compressed_jpeg_image,
jr_compressed_ptr compressed_gain_map,
jr_exif_ptr exif,
+ void* icc, size_t icc_size,
ultrahdr_metadata_ptr metadata,
jr_compressed_ptr dest) {
if (compressed_jpeg_image == nullptr
@@ -1067,6 +1180,12 @@
return ERROR_JPEGR_INVALID_NULL_PTR;
}
+ if (metadata->minContentBoost < 1.0f || metadata->maxContentBoost < metadata->minContentBoost) {
+ ALOGE("received bad value for content boost min %f, max %f", metadata->minContentBoost,
+ metadata->maxContentBoost);
+ return ERROR_JPEGR_INVALID_INPUT_TYPE;
+ }
+
const string nameSpace = "http://ns.adobe.com/xap/1.0/";
const int nameSpaceLength = nameSpace.size() + 1; // need to count the null terminator
@@ -1115,6 +1234,18 @@
JPEGR_CHECK(Write(dest, (void*)xmp_primary.c_str(), xmp_primary.size(), pos));
}
+ // Write ICC
+ if (icc != nullptr && icc_size > 0) {
+ const int length = icc_size + 2;
+ const uint8_t lengthH = ((length >> 8) & 0xff);
+ const uint8_t lengthL = (length & 0xff);
+ JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+ JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+ JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+ JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+ JPEGR_CHECK(Write(dest, icc, icc_size, pos));
+ }
+
// Prepare and write MPF
{
const int length = 2 + calculateMpfSize();
@@ -1222,4 +1353,82 @@
return NO_ERROR;
}
+status_t JpegR::convertYuv(jr_uncompressed_ptr image,
+ ultrahdr_color_gamut src_encoding,
+ ultrahdr_color_gamut dest_encoding) {
+ if (image == nullptr) {
+ return ERROR_JPEGR_INVALID_NULL_PTR;
+ }
+
+ if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED
+ || dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ ColorTransformFn conversionFn = nullptr;
+ switch (src_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ return NO_ERROR;
+ case ULTRAHDR_COLORGAMUT_P3:
+ conversionFn = yuv709To601;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ conversionFn = yuv709To2100;
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ conversionFn = yuv601To709;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ return NO_ERROR;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ conversionFn = yuv601To2100;
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ conversionFn = yuv2100To709;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ conversionFn = yuv2100To601;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ return NO_ERROR;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ if (conversionFn == nullptr) {
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ for (size_t y = 0; y < image->height / 2; ++y) {
+ for (size_t x = 0; x < image->width / 2; ++x) {
+ transformYuv420(image, x, y, conversionFn);
+ }
+ }
+
+ return NO_ERROR;
+}
+
} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/multipictureformat.cpp b/libs/ultrahdr/multipictureformat.cpp
index 7a265c6..f1679ef 100644
--- a/libs/ultrahdr/multipictureformat.cpp
+++ b/libs/ultrahdr/multipictureformat.cpp
@@ -30,7 +30,7 @@
sp<DataStruct> generateMpf(int primary_image_size, int primary_image_offset,
int secondary_image_size, int secondary_image_offset) {
size_t mpf_size = calculateMpfSize();
- sp<DataStruct> dataStruct = new DataStruct(mpf_size);
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(mpf_size);
dataStruct->write(static_cast<const void*>(kMpfSig), sizeof(kMpfSig));
#if USE_BIG_ENDIAN
diff --git a/libs/ultrahdr/tests/Android.bp b/libs/ultrahdr/tests/Android.bp
index 7dd9d04..5944130 100644
--- a/libs/ultrahdr/tests/Android.bp
+++ b/libs/ultrahdr/tests/Android.bp
@@ -25,8 +25,9 @@
name: "libultrahdr_test",
test_suites: ["device-tests"],
srcs: [
- "jpegr_test.cpp",
"gainmapmath_test.cpp",
+ "icchelper_test.cpp",
+ "jpegr_test.cpp",
],
shared_libs: [
"libimage_io",
@@ -72,5 +73,7 @@
static_libs: [
"libgtest",
"libjpegdecoder",
+ "libultrahdr",
+ "libutils",
],
}
diff --git a/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
new file mode 100644
index 0000000..f61e0e8
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/gainmapmath_test.cpp b/libs/ultrahdr/tests/gainmapmath_test.cpp
index c456653..af90365 100644
--- a/libs/ultrahdr/tests/gainmapmath_test.cpp
+++ b/libs/ultrahdr/tests/gainmapmath_test.cpp
@@ -28,6 +28,7 @@
float ComparisonEpsilon() { return 1e-4f; }
float LuminanceEpsilon() { return 1e-2f; }
+ float YuvConversionEpsilon() { return 1.0f / (255.0f * 2.0f); }
Color Yuv420(uint8_t y, uint8_t u, uint8_t v) {
return {{{ static_cast<float>(y) / 255.0f,
@@ -63,9 +64,13 @@
Color YuvBlack() { return {{{ 0.0f, 0.0f, 0.0f }}}; }
Color YuvWhite() { return {{{ 1.0f, 0.0f, 0.0f }}}; }
- Color SrgbYuvRed() { return {{{ 0.299f, -0.1687f, 0.5f }}}; }
- Color SrgbYuvGreen() { return {{{ 0.587f, -0.3313f, -0.4187f }}}; }
- Color SrgbYuvBlue() { return {{{ 0.114f, 0.5f, -0.0813f }}}; }
+ Color SrgbYuvRed() { return {{{ 0.2126f, -0.11457f, 0.5f }}}; }
+ Color SrgbYuvGreen() { return {{{ 0.7152f, -0.38543f, -0.45415f }}}; }
+ Color SrgbYuvBlue() { return {{{ 0.0722f, 0.5f, -0.04585f }}}; }
+
+ Color P3YuvRed() { return {{{ 0.299f, -0.16874f, 0.5f }}}; }
+ Color P3YuvGreen() { return {{{ 0.587f, -0.33126f, -0.41869f }}}; }
+ Color P3YuvBlue() { return {{{ 0.114f, 0.5f, -0.08131f }}}; }
Color Bt2100YuvRed() { return {{{ 0.2627f, -0.13963f, 0.5f }}}; }
Color Bt2100YuvGreen() { return {{{ 0.6780f, -0.36037f, -0.45979f }}}; }
@@ -78,6 +83,13 @@
return luminance_scaled * kSdrWhiteNits;
}
+ float P3YuvToLuminance(Color yuv_gamma, ColorCalculationFn luminanceFn) {
+ Color rgb_gamma = p3YuvToRgb(yuv_gamma);
+ Color rgb = srgbInvOetf(rgb_gamma);
+ float luminance_scaled = luminanceFn(rgb);
+ return luminance_scaled * kSdrWhiteNits;
+ }
+
float Bt2100YuvToLuminance(Color yuv_gamma, ColorTransformFn hdrInvOetf,
ColorTransformFn gamutConversionFn, ColorCalculationFn luminanceFn,
float scale_factor) {
@@ -402,6 +414,56 @@
EXPECT_FLOAT_EQ(p3Luminance(RgbBlue()), 0.06891f);
}
+TEST_F(GainMapMathTest, P3YuvToRgb) {
+ Color rgb_black = p3YuvToRgb(YuvBlack());
+ EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+ Color rgb_white = p3YuvToRgb(YuvWhite());
+ EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+ Color rgb_r = p3YuvToRgb(P3YuvRed());
+ EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+ Color rgb_g = p3YuvToRgb(P3YuvGreen());
+ EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+ Color rgb_b = p3YuvToRgb(P3YuvBlue());
+ EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbToYuv) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv_black, YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv_white, YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv_r, P3YuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv_g, P3YuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv_b, P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbYuvRoundtrip) {
+ Color rgb_black = p3YuvToRgb(p3RgbToYuv(RgbBlack()));
+ EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+ Color rgb_white = p3YuvToRgb(p3RgbToYuv(RgbWhite()));
+ EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+ Color rgb_r = p3YuvToRgb(p3RgbToYuv(RgbRed()));
+ EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+ Color rgb_g = p3YuvToRgb(p3RgbToYuv(RgbGreen()));
+ EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+ Color rgb_b = p3YuvToRgb(p3RgbToYuv(RgbBlue()));
+ EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
TEST_F(GainMapMathTest, Bt2100Luminance) {
EXPECT_FLOAT_EQ(bt2100Luminance(RgbBlack()), 0.0f);
EXPECT_FLOAT_EQ(bt2100Luminance(RgbWhite()), 1.0f);
@@ -461,6 +523,163 @@
EXPECT_RGB_NEAR(rgb_b, RgbBlue());
}
+TEST_F(GainMapMathTest, Bt709ToBt601YuvConversion) {
+ Color yuv_black = srgbRgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_black), YuvBlack());
+
+ Color yuv_white = srgbRgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_white), YuvWhite());
+
+ Color yuv_r = srgbRgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_r), P3YuvRed());
+
+ Color yuv_g = srgbRgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_g), P3YuvGreen());
+
+ Color yuv_b = srgbRgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt709ToBt2100YuvConversion) {
+ Color yuv_black = srgbRgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_black), YuvBlack());
+
+ Color yuv_white = srgbRgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_white), YuvWhite());
+
+ Color yuv_r = srgbRgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_r), Bt2100YuvRed());
+
+ Color yuv_g = srgbRgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_g), Bt2100YuvGreen());
+
+ Color yuv_b = srgbRgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt709YuvConversion) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_black), YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_white), YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_r), SrgbYuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_g), SrgbYuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt2100YuvConversion) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_black), YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_white), YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_r), Bt2100YuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_g), Bt2100YuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt709YuvConversion) {
+ Color yuv_black = bt2100RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_black), YuvBlack());
+
+ Color yuv_white = bt2100RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_white), YuvWhite());
+
+ Color yuv_r = bt2100RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_r), SrgbYuvRed());
+
+ Color yuv_g = bt2100RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_g), SrgbYuvGreen());
+
+ Color yuv_b = bt2100RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt601YuvConversion) {
+ Color yuv_black = bt2100RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_black), YuvBlack());
+
+ Color yuv_white = bt2100RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_white), YuvWhite());
+
+ Color yuv_r = bt2100RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_r), P3YuvRed());
+
+ Color yuv_g = bt2100RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_g), P3YuvGreen());
+
+ Color yuv_b = bt2100RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, TransformYuv420) {
+ ColorTransformFn transforms[] = { yuv709To601, yuv709To2100, yuv601To709, yuv601To2100,
+ yuv2100To709, yuv2100To601 };
+ for (const ColorTransformFn& transform : transforms) {
+ jpegr_uncompressed_struct input = Yuv420Image();
+
+ size_t out_buf_size = input.width * input.height * 3 / 2;
+ std::unique_ptr<uint8_t[]> out_buf = std::make_unique<uint8_t[]>(out_buf_size);
+ memcpy(out_buf.get(), input.data, out_buf_size);
+ jpegr_uncompressed_struct output = Yuv420Image();
+ output.data = out_buf.get();
+
+ transformYuv420(&output, 1, 1, transform);
+
+ for (size_t y = 0; y < 4; ++y) {
+ for (size_t x = 0; x < 4; ++x) {
+ // Skip the last chroma sample, which we modified above
+ if (x >= 2 && y >= 2) {
+ continue;
+ }
+
+ // All other pixels should remain unchanged
+ EXPECT_YUV_EQ(getYuv420Pixel(&input, x, y), getYuv420Pixel(&output, x, y));
+ }
+ }
+
+ // modified pixels should be updated as intended by the transformYuv420 algorithm
+ Color in1 = getYuv420Pixel(&input, 2, 2);
+ Color in2 = getYuv420Pixel(&input, 3, 2);
+ Color in3 = getYuv420Pixel(&input, 2, 3);
+ Color in4 = getYuv420Pixel(&input, 3, 3);
+ Color out1 = getYuv420Pixel(&output, 2, 2);
+ Color out2 = getYuv420Pixel(&output, 3, 2);
+ Color out3 = getYuv420Pixel(&output, 2, 3);
+ Color out4 = getYuv420Pixel(&output, 3, 3);
+
+ EXPECT_NEAR(transform(in1).y, out1.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in2).y, out2.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in3).y, out3.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in4).y, out4.y, YuvConversionEpsilon());
+
+ Color expect_uv = (transform(in1) + transform(in2) + transform(in3) + transform(in4)) / 4.0f;
+
+ EXPECT_NEAR(expect_uv.u, out1.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out2.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out3.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out4.u, YuvConversionEpsilon());
+
+ EXPECT_NEAR(expect_uv.v, out1.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out2.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out3.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out4.v, YuvConversionEpsilon());
+ }
+}
+
TEST_F(GainMapMathTest, HlgOetf) {
EXPECT_FLOAT_EQ(hlgOetf(0.0f), 0.0f);
EXPECT_NEAR(hlgOetf(0.04167f), 0.35357f, ComparisonEpsilon());
@@ -693,7 +912,7 @@
TEST_F(GainMapMathTest, EncodeGain) {
ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
- .minContentBoost = 1.0f / 4.0f };
+ .minContentBoost = 1.0f / 4.0f };
EXPECT_EQ(encodeGain(0.0f, 0.0f, &metadata), 127);
EXPECT_EQ(encodeGain(0.0f, 1.0f, &metadata), 127);
@@ -751,7 +970,7 @@
TEST_F(GainMapMathTest, ApplyGain) {
ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
- .minContentBoost = 1.0f / 4.0f };
+ .minContentBoost = 1.0f / 4.0f };
float displayBoost = metadata.maxContentBoost;
EXPECT_RGB_NEAR(applyGain(RgbBlack(), 0.0f, &metadata), RgbBlack());
diff --git a/libs/ultrahdr/tests/icchelper_test.cpp b/libs/ultrahdr/tests/icchelper_test.cpp
new file mode 100644
index 0000000..ff61c08
--- /dev/null
+++ b/libs/ultrahdr/tests/icchelper_test.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <ultrahdr/icc.h>
+#include <ultrahdr/ultrahdr.h>
+#include <utils/Log.h>
+
+namespace android::ultrahdr {
+
+class IccHelperTest : public testing::Test {
+public:
+ IccHelperTest();
+ ~IccHelperTest();
+protected:
+ virtual void SetUp();
+ virtual void TearDown();
+};
+
+IccHelperTest::IccHelperTest() {}
+
+IccHelperTest::~IccHelperTest() {}
+
+void IccHelperTest::SetUp() {}
+
+void IccHelperTest::TearDown() {}
+
+TEST_F(IccHelperTest, iccWriteThenRead) {
+ sp<DataStruct> iccBt709 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ ULTRAHDR_COLORGAMUT_BT709);
+ ASSERT_NE(iccBt709->getLength(), 0);
+ ASSERT_NE(iccBt709->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccBt709->getData(), iccBt709->getLength()),
+ ULTRAHDR_COLORGAMUT_BT709);
+
+ sp<DataStruct> iccP3 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_P3);
+ ASSERT_NE(iccP3->getLength(), 0);
+ ASSERT_NE(iccP3->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccP3->getData(), iccP3->getLength()),
+ ULTRAHDR_COLORGAMUT_P3);
+
+ sp<DataStruct> iccBt2100 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ ULTRAHDR_COLORGAMUT_BT2100);
+ ASSERT_NE(iccBt2100->getLength(), 0);
+ ASSERT_NE(iccBt2100->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccBt2100->getData(), iccBt2100->getLength()),
+ ULTRAHDR_COLORGAMUT_BT2100);
+}
+
+TEST_F(IccHelperTest, iccEndianness) {
+ sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_BT709);
+ size_t profile_size = icc->getLength() - kICCIdentifierSize;
+
+ uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc->getData()) + kICCIdentifierSize;
+ uint32_t encoded_size = static_cast<uint32_t>(icc_bytes[0]) << 24 |
+ static_cast<uint32_t>(icc_bytes[1]) << 16 |
+ static_cast<uint32_t>(icc_bytes[2]) << 8 |
+ static_cast<uint32_t>(icc_bytes[3]);
+
+ EXPECT_EQ(static_cast<size_t>(encoded_size), profile_size);
+}
+
+} // namespace android::ultrahdr
+
diff --git a/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
index c79dbe3..e2da01c 100644
--- a/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
+++ b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
@@ -15,6 +15,7 @@
*/
#include <ultrahdr/jpegdecoderhelper.h>
+#include <ultrahdr/icc.h>
#include <gtest/gtest.h>
#include <utils/Log.h>
@@ -22,11 +23,19 @@
namespace android::ultrahdr {
+// No ICC or EXIF
#define YUV_IMAGE "/sdcard/Documents/minnie-320x240-yuv.jpg"
#define YUV_IMAGE_SIZE 20193
+// Has ICC and EXIF
+#define YUV_ICC_IMAGE "/sdcard/Documents/minnie-320x240-yuv-icc.jpg"
+#define YUV_ICC_IMAGE_SIZE 34266
+// No ICC or EXIF
#define GREY_IMAGE "/sdcard/Documents/minnie-320x240-y.jpg"
#define GREY_IMAGE_SIZE 20193
+#define IMAGE_WIDTH 320
+#define IMAGE_HEIGHT 240
+
class JpegDecoderHelperTest : public testing::Test {
public:
struct Image {
@@ -39,7 +48,7 @@
virtual void SetUp();
virtual void TearDown();
- Image mYuvImage, mGreyImage;
+ Image mYuvImage, mYuvIccImage, mGreyImage;
};
JpegDecoderHelperTest::JpegDecoderHelperTest() {}
@@ -79,6 +88,10 @@
FAIL() << "Load file " << YUV_IMAGE << " failed";
}
mYuvImage.size = YUV_IMAGE_SIZE;
+ if (!loadFile(YUV_ICC_IMAGE, &mYuvIccImage)) {
+ FAIL() << "Load file " << YUV_ICC_IMAGE << " failed";
+ }
+ mYuvIccImage.size = YUV_ICC_IMAGE_SIZE;
if (!loadFile(GREY_IMAGE, &mGreyImage)) {
FAIL() << "Load file " << GREY_IMAGE << " failed";
}
@@ -91,6 +104,16 @@
JpegDecoderHelper decoder;
EXPECT_TRUE(decoder.decompressImage(mYuvImage.buffer.get(), mYuvImage.size));
ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+ EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+ ULTRAHDR_COLORGAMUT_UNSPECIFIED);
+}
+
+TEST_F(JpegDecoderHelperTest, decodeYuvIccImage) {
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.decompressImage(mYuvIccImage.buffer.get(), mYuvIccImage.size));
+ ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+ EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+ ULTRAHDR_COLORGAMUT_BT709);
}
TEST_F(JpegDecoderHelperTest, decodeGreyImage) {
@@ -99,4 +122,35 @@
ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
}
-} // namespace android::ultrahdr
\ No newline at end of file
+TEST_F(JpegDecoderHelperTest, getCompressedImageParameters) {
+ size_t width = 0, height = 0;
+ std::vector<uint8_t> icc, exif;
+
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvImage.buffer.get(), mYuvImage.size,
+ &width, &height, &icc, &exif));
+
+ EXPECT_EQ(width, IMAGE_WIDTH);
+ EXPECT_EQ(height, IMAGE_HEIGHT);
+ EXPECT_EQ(icc.size(), 0);
+ EXPECT_EQ(exif.size(), 0);
+}
+
+TEST_F(JpegDecoderHelperTest, getCompressedImageParametersIcc) {
+ size_t width = 0, height = 0;
+ std::vector<uint8_t> icc, exif;
+
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvIccImage.buffer.get(), mYuvIccImage.size,
+ &width, &height, &icc, &exif));
+
+ EXPECT_EQ(width, IMAGE_WIDTH);
+ EXPECT_EQ(height, IMAGE_HEIGHT);
+ EXPECT_GT(icc.size(), 0);
+ EXPECT_GT(exif.size(), 0);
+
+ EXPECT_EQ(IccHelper::readIccColorGamut(icc.data(), icc.size()),
+ ULTRAHDR_COLORGAMUT_BT709);
+}
+
+} // namespace android::ultrahdr
diff --git a/services/inputflinger/dispatcher/Entry.cpp b/services/inputflinger/dispatcher/Entry.cpp
index b625a1b..a670ebe 100644
--- a/services/inputflinger/dispatcher/Entry.cpp
+++ b/services/inputflinger/dispatcher/Entry.cpp
@@ -352,7 +352,7 @@
entry.transform.dump(transform, "transform");
out << ", resolvedFlags=" << entry.resolvedFlags
<< ", targetFlags=" << entry.targetFlags.string() << ", transform=" << transform
- << "} original =" << entry.eventEntry->getDescription();
+ << "} original: " << entry.eventEntry->getDescription();
return out;
}
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index 2054329..f18265f 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -3293,11 +3293,8 @@
if (!connection->inputState.trackKey(keyEntry, dispatchEntry->resolvedAction,
dispatchEntry->resolvedFlags)) {
- if (DEBUG_DISPATCH_CYCLE) {
- ALOGD("channel '%s' ~ enqueueDispatchEntryLocked: skipping inconsistent key "
- "event",
- connection->getInputChannelName().c_str());
- }
+ LOG(WARNING) << "channel " << connection->getInputChannelName()
+ << "~ dropping inconsistent event: " << *dispatchEntry;
return; // skip the inconsistent event
}
break;
@@ -3350,11 +3347,8 @@
if (!connection->inputState.trackMotion(motionEntry, dispatchEntry->resolvedAction,
dispatchEntry->resolvedFlags)) {
- if (DEBUG_DISPATCH_CYCLE) {
- ALOGD("channel '%s' ~ enqueueDispatchEntryLocked: skipping inconsistent motion "
- "event",
- connection->getInputChannelName().c_str());
- }
+ LOG(WARNING) << "channel " << connection->getInputChannelName()
+ << "~ dropping inconsistent event: " << *dispatchEntry;
return; // skip the inconsistent event
}
@@ -4135,11 +4129,11 @@
}
}
- if (action == AMOTION_EVENT_ACTION_DOWN) {
- LOG_ALWAYS_FATAL_IF(splitDownTime != originalMotionEntry.eventTime,
- "Split motion event has mismatching downTime and eventTime for "
- "ACTION_DOWN, motionEntry=%s, splitDownTime=%" PRId64,
- originalMotionEntry.getDescription().c_str(), splitDownTime);
+ if (action == AMOTION_EVENT_ACTION_DOWN && splitDownTime != originalMotionEntry.eventTime) {
+ logDispatchStateLocked();
+ LOG_ALWAYS_FATAL("Split motion event has mismatching downTime and eventTime for "
+ "ACTION_DOWN, motionEntry=%s, splitDownTime=%" PRId64,
+ originalMotionEntry.getDescription().c_str(), splitDownTime);
}
int32_t newId = mIdGenerator.nextId();
diff --git a/services/inputflinger/dispatcher/InputDispatcher.h b/services/inputflinger/dispatcher/InputDispatcher.h
index ae365cd..20fe0ca 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.h
+++ b/services/inputflinger/dispatcher/InputDispatcher.h
@@ -202,7 +202,7 @@
DropReason mLastDropReason GUARDED_BY(mLock);
- const IdGenerator mIdGenerator;
+ const IdGenerator mIdGenerator GUARDED_BY(mLock);
int64_t mWindowInfosVsyncId GUARDED_BY(mLock);
@@ -649,7 +649,7 @@
// splitDownTime refers to the time of first 'down' event on that particular target
std::unique_ptr<MotionEntry> splitMotionEvent(const MotionEntry& originalMotionEntry,
std::bitset<MAX_POINTER_ID + 1> pointerIds,
- nsecs_t splitDownTime);
+ nsecs_t splitDownTime) REQUIRES(mLock);
// Reset and drop everything the dispatcher is doing.
void resetAndDropEverythingLocked(const char* reason) REQUIRES(mLock);
diff --git a/services/inputflinger/dispatcher/InputState.cpp b/services/inputflinger/dispatcher/InputState.cpp
index 4652c2d..2fcb89a 100644
--- a/services/inputflinger/dispatcher/InputState.cpp
+++ b/services/inputflinger/dispatcher/InputState.cpp
@@ -93,11 +93,7 @@
mMotionMementos.erase(mMotionMementos.begin() + index);
return true;
}
- if (DEBUG_OUTBOUND_EVENT_DETAILS) {
- ALOGD("Dropping inconsistent motion up or cancel event: deviceId=%d, source=%08x, "
- "displayId=%" PRId32 ", actionMasked=%d",
- entry.deviceId, entry.source, entry.displayId, actionMasked);
- }
+
return false;
}
@@ -150,11 +146,7 @@
return true;
}
}
- if (DEBUG_OUTBOUND_EVENT_DETAILS) {
- ALOGD("Dropping inconsistent motion pointer up/down or move event: "
- "deviceId=%d, source=%08x, displayId=%" PRId32 ", actionMasked=%d",
- entry.deviceId, entry.source, entry.displayId, actionMasked);
- }
+
return false;
}
@@ -164,11 +156,7 @@
mMotionMementos.erase(mMotionMementos.begin() + index);
return true;
}
- if (DEBUG_OUTBOUND_EVENT_DETAILS) {
- ALOGD("Dropping inconsistent motion hover exit event: deviceId=%d, source=%08x, "
- "displayId=%" PRId32,
- entry.deviceId, entry.source, entry.displayId);
- }
+
return false;
}
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index 4d75138..92e7f43 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -1164,12 +1164,29 @@
mInfo.inputConfig = WindowInfo::InputConfig::DEFAULT;
}
- sp<FakeWindowHandle> clone(
- const std::shared_ptr<InputApplicationHandle>& inputApplicationHandle,
- const std::unique_ptr<InputDispatcher>& dispatcher, int32_t displayId) {
- sp<FakeWindowHandle> handle =
- sp<FakeWindowHandle>::make(inputApplicationHandle, dispatcher,
- mInfo.name + "(Mirror)", displayId, mInfo.token);
+ sp<FakeWindowHandle> clone(int32_t displayId) {
+ sp<FakeWindowHandle> handle = sp<FakeWindowHandle>::make(mInfo.name + "(Mirror)");
+ handle->mInfo = mInfo;
+ handle->mInfo.displayId = displayId;
+ handle->mInfo.id = sId++;
+ handle->mInputReceiver = mInputReceiver;
+ return handle;
+ }
+
+ /**
+ * This is different from clone, because clone will make a "mirror" window - a window with the
+ * same token, but a different ID. The original window and the clone window are allowed to be
+ * sent to the dispatcher at the same time - they can coexist inside the dispatcher.
+ * This function will create a different object of WindowInfoHandle, but with the same
+ * properties as the original object - including the ID.
+ * You can use either the old or the new object to consume the events.
+ * IMPORTANT: The duplicated object is supposed to replace the original object, and not appear
+ * at the same time inside dispatcher.
+ */
+ sp<FakeWindowHandle> duplicate() {
+ sp<FakeWindowHandle> handle = sp<FakeWindowHandle>::make(mName);
+ handle->mInfo = mInfo;
+ handle->mInputReceiver = mInputReceiver;
return handle;
}
@@ -1438,9 +1455,11 @@
int getChannelFd() { return mInputReceiver->getChannelFd(); }
private:
+ FakeWindowHandle(std::string name) : mName(name){};
const std::string mName;
- std::unique_ptr<FakeInputReceiver> mInputReceiver;
+ std::shared_ptr<FakeInputReceiver> mInputReceiver;
static std::atomic<int32_t> sId; // each window gets a unique id, like in surfaceflinger
+ friend class sp<FakeWindowHandle>;
};
std::atomic<int32_t> FakeWindowHandle::sId{1};
@@ -3912,6 +3931,72 @@
}
/**
+ * Send a two-pointer gesture to a single window. The window's orientation changes in response to
+ * the first pointer.
+ * Ensure that the second pointer is not sent to the window.
+ *
+ * The subsequent gesture should be correctly delivered to the window.
+ */
+TEST_F(InputDispatcherTest, MultiplePointersWithRotatingWindow) {
+ std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+ sp<FakeWindowHandle> window =
+ sp<FakeWindowHandle>::make(application, mDispatcher, "Window", ADISPLAY_ID_DEFAULT);
+ window->setFrame(Rect(0, 0, 400, 400));
+ mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {window}}});
+
+ const nsecs_t baseTime = systemTime(SYSTEM_TIME_MONOTONIC);
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .downTime(baseTime + 10)
+ .eventTime(baseTime + 10)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(100).y(100))
+ .build());
+
+ window->consumeMotionEvent(WithMotionAction(ACTION_DOWN));
+
+ // We need a new window object for the same window, because dispatcher will store objects by
+ // reference. That means that the testing code and the dispatcher will refer to the same shared
+ // object. Calling window->setTransform here would affect dispatcher's comparison
+ // of the old window to the new window, since both the old window and the new window would be
+ // updated to the same value.
+ sp<FakeWindowHandle> windowDup = window->duplicate();
+
+ // Change the transform so that the orientation is now different from original.
+ windowDup->setWindowTransform(0, -1, 1, 0);
+
+ mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {windowDup}}});
+
+ window->consumeMotionEvent(WithMotionAction(ACTION_CANCEL));
+
+ mDispatcher->notifyMotion(MotionArgsBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .downTime(baseTime + 10)
+ .eventTime(baseTime + 30)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(100).y(100))
+ .pointer(PointerBuilder(1, ToolType::FINGER).x(200).y(200))
+ .build());
+
+ // Finish the gesture and start a new one. Ensure the new gesture is sent to the window
+ mDispatcher->notifyMotion(MotionArgsBuilder(POINTER_1_UP, AINPUT_SOURCE_TOUCHSCREEN)
+ .downTime(baseTime + 10)
+ .eventTime(baseTime + 40)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(100).y(100))
+ .pointer(PointerBuilder(1, ToolType::FINGER).x(200).y(200))
+ .build());
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_UP, AINPUT_SOURCE_TOUCHSCREEN)
+ .downTime(baseTime + 10)
+ .eventTime(baseTime + 50)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(100).y(100))
+ .build());
+
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .downTime(baseTime + 60)
+ .eventTime(baseTime + 60)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(40).y(40))
+ .build());
+
+ windowDup->consumeMotionEvent(WithMotionAction(ACTION_DOWN));
+}
+
+/**
* Ensure the correct coordinate spaces are used by InputDispatcher.
*
* InputDispatcher works in the display space, so its coordinate system is relative to the display
@@ -4492,16 +4577,13 @@
sp<FakeWindowHandle>::make(application, mDispatcher, "D_1_W2", ADISPLAY_ID_DEFAULT);
secondWindowInPrimary->setFrame(Rect(100, 0, 200, 100));
- sp<FakeWindowHandle> mirrorWindowInPrimary =
- firstWindowInPrimary->clone(application, mDispatcher, ADISPLAY_ID_DEFAULT);
+ sp<FakeWindowHandle> mirrorWindowInPrimary = firstWindowInPrimary->clone(ADISPLAY_ID_DEFAULT);
mirrorWindowInPrimary->setFrame(Rect(0, 100, 100, 200));
- sp<FakeWindowHandle> firstWindowInSecondary =
- firstWindowInPrimary->clone(application, mDispatcher, SECOND_DISPLAY_ID);
+ sp<FakeWindowHandle> firstWindowInSecondary = firstWindowInPrimary->clone(SECOND_DISPLAY_ID);
firstWindowInSecondary->setFrame(Rect(0, 0, 100, 100));
- sp<FakeWindowHandle> secondWindowInSecondary =
- secondWindowInPrimary->clone(application, mDispatcher, SECOND_DISPLAY_ID);
+ sp<FakeWindowHandle> secondWindowInSecondary = secondWindowInPrimary->clone(SECOND_DISPLAY_ID);
secondWindowInPrimary->setFrame(Rect(100, 0, 200, 100));
// Update window info, let it find window handle of second display first.
@@ -4551,16 +4633,13 @@
sp<FakeWindowHandle>::make(application, mDispatcher, "D_1_W2", ADISPLAY_ID_DEFAULT);
secondWindowInPrimary->setFrame(Rect(100, 0, 200, 100));
- sp<FakeWindowHandle> mirrorWindowInPrimary =
- firstWindowInPrimary->clone(application, mDispatcher, ADISPLAY_ID_DEFAULT);
+ sp<FakeWindowHandle> mirrorWindowInPrimary = firstWindowInPrimary->clone(ADISPLAY_ID_DEFAULT);
mirrorWindowInPrimary->setFrame(Rect(0, 100, 100, 200));
- sp<FakeWindowHandle> firstWindowInSecondary =
- firstWindowInPrimary->clone(application, mDispatcher, SECOND_DISPLAY_ID);
+ sp<FakeWindowHandle> firstWindowInSecondary = firstWindowInPrimary->clone(SECOND_DISPLAY_ID);
firstWindowInSecondary->setFrame(Rect(0, 0, 100, 100));
- sp<FakeWindowHandle> secondWindowInSecondary =
- secondWindowInPrimary->clone(application, mDispatcher, SECOND_DISPLAY_ID);
+ sp<FakeWindowHandle> secondWindowInSecondary = secondWindowInPrimary->clone(SECOND_DISPLAY_ID);
secondWindowInPrimary->setFrame(Rect(100, 0, 200, 100));
// Update window info, let it find window handle of second display first.
@@ -6999,6 +7078,55 @@
mWindow->assertNoEvents();
}
+/**
+ * Send an event to the app and have the app not respond right away.
+ * When ANR is raised, policy will tell the dispatcher to cancel the events for that window.
+ * So InputDispatcher will enqueue ACTION_CANCEL event as well.
+ * At some point, the window becomes responsive again.
+ * Ensure that subsequent events get dropped, and the next gesture is delivered.
+ */
+TEST_F(InputDispatcherSingleWindowAnr, TwoGesturesWithAnr) {
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(10).y(10))
+ .build());
+
+ std::optional<uint32_t> sequenceNum = mWindow->receiveEvent(); // ACTION_DOWN
+ ASSERT_TRUE(sequenceNum);
+ const std::chrono::duration timeout = mWindow->getDispatchingTimeout(DISPATCHING_TIMEOUT);
+ mFakePolicy->assertNotifyWindowUnresponsiveWasCalled(timeout, mWindow);
+
+ mWindow->finishEvent(*sequenceNum);
+ mWindow->consumeMotionEvent(WithMotionAction(ACTION_CANCEL));
+ ASSERT_TRUE(mDispatcher->waitForIdle());
+ mFakePolicy->assertNotifyWindowResponsiveWasCalled(mWindow->getToken(), mWindow->getPid());
+
+ // Now that the window is responsive, let's continue the gesture.
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_MOVE, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(11).y(11))
+ .build());
+
+ mDispatcher->notifyMotion(MotionArgsBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(11).y(11))
+ .pointer(PointerBuilder(1, ToolType::FINGER).x(3).y(3))
+ .build());
+
+ mDispatcher->notifyMotion(MotionArgsBuilder(POINTER_1_UP, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(11).y(11))
+ .pointer(PointerBuilder(1, ToolType::FINGER).x(3).y(3))
+ .build());
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_UP, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(11).y(11))
+ .build());
+ // We already canceled this pointer, so the window shouldn't get any new events.
+ mWindow->assertNoEvents();
+
+ // Start another one.
+ mDispatcher->notifyMotion(MotionArgsBuilder(ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .pointer(PointerBuilder(0, ToolType::FINGER).x(15).y(15))
+ .build());
+ mWindow->consumeMotionEvent(WithMotionAction(ACTION_DOWN));
+}
+
class InputDispatcherMultiWindowAnr : public InputDispatcherTest {
virtual void SetUp() override {
InputDispatcherTest::SetUp();
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index f12aab7..cf1b018 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -3599,7 +3599,7 @@
return {inputBounds, inputBoundsValid};
}
-bool Layer::simpleBufferUpdate(const layer_state_t& s) const {
+bool Layer::isSimpleBufferUpdate(const layer_state_t& s) const {
const uint64_t requiredFlags = layer_state_t::eBufferChanged;
const uint64_t deniedFlags = layer_state_t::eProducerDisconnect | layer_state_t::eLayerChanged |
@@ -3608,51 +3608,42 @@
layer_state_t::eLayerStackChanged | layer_state_t::eAutoRefreshChanged |
layer_state_t::eReparent;
- const uint64_t allowedFlags = layer_state_t::eHasListenerCallbacksChanged |
- layer_state_t::eFrameRateSelectionPriority | layer_state_t::eFrameRateChanged |
- layer_state_t::eSurfaceDamageRegionChanged | layer_state_t::eApiChanged |
- layer_state_t::eMetadataChanged | layer_state_t::eDropInputModeChanged |
- layer_state_t::eInputInfoChanged;
-
if ((s.what & requiredFlags) != requiredFlags) {
- ALOGV("%s: false [missing required flags 0x%" PRIx64 "]", __func__,
- (s.what | requiredFlags) & ~s.what);
+ ATRACE_FORMAT_INSTANT("%s: false [missing required flags 0x%" PRIx64 "]", __func__,
+ (s.what | requiredFlags) & ~s.what);
return false;
}
if (s.what & deniedFlags) {
- ALOGV("%s: false [has denied flags 0x%" PRIx64 "]", __func__, s.what & deniedFlags);
+ ATRACE_FORMAT_INSTANT("%s: false [has denied flags 0x%" PRIx64 "]", __func__,
+ s.what & deniedFlags);
return false;
}
- if (s.what & allowedFlags) {
- ALOGV("%s: [has allowed flags 0x%" PRIx64 "]", __func__, s.what & allowedFlags);
- }
-
if (s.what & layer_state_t::ePositionChanged) {
if (mRequestedTransform.tx() != s.x || mRequestedTransform.ty() != s.y) {
- ALOGV("%s: false [ePositionChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [ePositionChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eAlphaChanged) {
if (mDrawingState.color.a != s.color.a) {
- ALOGV("%s: false [eAlphaChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eAlphaChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eColorTransformChanged) {
if (mDrawingState.colorTransform != s.colorTransform) {
- ALOGV("%s: false [eColorTransformChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eColorTransformChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eBackgroundColorChanged) {
if (mDrawingState.bgColorLayer || s.bgColor.a != 0) {
- ALOGV("%s: false [eBackgroundColorChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eBackgroundColorChanged changed]", __func__);
return false;
}
}
@@ -3662,91 +3653,92 @@
mRequestedTransform.dtdy() != s.matrix.dtdy ||
mRequestedTransform.dtdx() != s.matrix.dtdx ||
mRequestedTransform.dsdy() != s.matrix.dsdy) {
- ALOGV("%s: false [eMatrixChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eMatrixChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eCornerRadiusChanged) {
if (mDrawingState.cornerRadius != s.cornerRadius) {
- ALOGV("%s: false [eCornerRadiusChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eCornerRadiusChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eBackgroundBlurRadiusChanged) {
if (mDrawingState.backgroundBlurRadius != static_cast<int>(s.backgroundBlurRadius)) {
- ALOGV("%s: false [eBackgroundBlurRadiusChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eBackgroundBlurRadiusChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eBufferTransformChanged) {
if (mDrawingState.bufferTransform != s.bufferTransform) {
- ALOGV("%s: false [eBufferTransformChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eBufferTransformChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eTransformToDisplayInverseChanged) {
if (mDrawingState.transformToDisplayInverse != s.transformToDisplayInverse) {
- ALOGV("%s: false [eTransformToDisplayInverseChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eTransformToDisplayInverseChanged changed]",
+ __func__);
return false;
}
}
if (s.what & layer_state_t::eCropChanged) {
if (mDrawingState.crop != s.crop) {
- ALOGV("%s: false [eCropChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eCropChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eDataspaceChanged) {
if (mDrawingState.dataspace != s.dataspace) {
- ALOGV("%s: false [eDataspaceChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eDataspaceChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eHdrMetadataChanged) {
if (mDrawingState.hdrMetadata != s.hdrMetadata) {
- ALOGV("%s: false [eHdrMetadataChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eHdrMetadataChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eSidebandStreamChanged) {
if (mDrawingState.sidebandStream != s.sidebandStream) {
- ALOGV("%s: false [eSidebandStreamChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eSidebandStreamChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eColorSpaceAgnosticChanged) {
if (mDrawingState.colorSpaceAgnostic != s.colorSpaceAgnostic) {
- ALOGV("%s: false [eColorSpaceAgnosticChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eColorSpaceAgnosticChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eShadowRadiusChanged) {
if (mDrawingState.shadowRadius != s.shadowRadius) {
- ALOGV("%s: false [eShadowRadiusChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eShadowRadiusChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eFixedTransformHintChanged) {
if (mDrawingState.fixedTransformHint != s.fixedTransformHint) {
- ALOGV("%s: false [eFixedTransformHintChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eFixedTransformHintChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eTrustedOverlayChanged) {
if (mDrawingState.isTrustedOverlay != s.isTrustedOverlay) {
- ALOGV("%s: false [eTrustedOverlayChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eTrustedOverlayChanged changed]", __func__);
return false;
}
}
@@ -3755,28 +3747,28 @@
StretchEffect temp = s.stretchEffect;
temp.sanitize();
if (mDrawingState.stretchEffect != temp) {
- ALOGV("%s: false [eStretchChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eStretchChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eBufferCropChanged) {
if (mDrawingState.bufferCrop != s.bufferCrop) {
- ALOGV("%s: false [eBufferCropChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eBufferCropChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eDestinationFrameChanged) {
if (mDrawingState.destinationFrame != s.destinationFrame) {
- ALOGV("%s: false [eDestinationFrameChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eDestinationFrameChanged changed]", __func__);
return false;
}
}
if (s.what & layer_state_t::eDimmingEnabledChanged) {
if (mDrawingState.dimmingEnabled != s.dimmingEnabled) {
- ALOGV("%s: false [eDimmingEnabledChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eDimmingEnabledChanged changed]", __func__);
return false;
}
}
@@ -3784,12 +3776,11 @@
if (s.what & layer_state_t::eExtendedRangeBrightnessChanged) {
if (mDrawingState.currentHdrSdrRatio != s.currentHdrSdrRatio ||
mDrawingState.desiredHdrSdrRatio != s.desiredHdrSdrRatio) {
- ALOGV("%s: false [eExtendedRangeBrightnessChanged changed]", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false [eExtendedRangeBrightnessChanged changed]", __func__);
return false;
}
}
- ALOGV("%s: true", __func__);
return true;
}
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index f7596e2..f34fdd9 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -866,7 +866,7 @@
std::string getPendingBufferCounterName() { return mBlastTransactionName; }
bool updateGeometry();
- bool simpleBufferUpdate(const layer_state_t&) const;
+ bool isSimpleBufferUpdate(const layer_state_t& s) const;
static bool isOpaqueFormat(PixelFormat format);
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index c52c912..e2e89ad 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -4444,26 +4444,27 @@
bool SurfaceFlinger::shouldLatchUnsignaled(const sp<Layer>& layer, const layer_state_t& state,
size_t numStates, bool firstTransaction) const {
if (enableLatchUnsignaledConfig == LatchUnsignaledConfig::Disabled) {
- ALOGV("%s: false (LatchUnsignaledConfig::Disabled)", __func__);
+ ATRACE_FORMAT_INSTANT("%s: false (LatchUnsignaledConfig::Disabled)", __func__);
return false;
}
if (enableLatchUnsignaledConfig == LatchUnsignaledConfig::Always) {
- ALOGV("%s: true (LatchUnsignaledConfig::Always)", __func__);
+ ATRACE_FORMAT_INSTANT("%s: true (LatchUnsignaledConfig::Always)", __func__);
return true;
}
// We only want to latch unsignaled when a single layer is updated in this
// transaction (i.e. not a blast sync transaction).
if (numStates != 1) {
- ALOGV("%s: false (numStates=%zu)", __func__, numStates);
+ ATRACE_FORMAT_INSTANT("%s: false (numStates=%zu)", __func__, numStates);
return false;
}
if (enableLatchUnsignaledConfig == LatchUnsignaledConfig::AutoSingleLayer) {
if (!firstTransaction) {
- ALOGV("%s: false (LatchUnsignaledConfig::AutoSingleLayer; not first transaction)",
- __func__);
+ ATRACE_FORMAT_INSTANT("%s: false (LatchUnsignaledConfig::AutoSingleLayer; not first "
+ "transaction)",
+ __func__);
return false;
}
@@ -4471,19 +4472,14 @@
// as it leads to jank due to RenderEngine waiting for unsignaled buffer
// or window animations being slow.
if (mScheduler->vsyncModulator().isVsyncConfigEarly()) {
- ALOGV("%s: false (LatchUnsignaledConfig::AutoSingleLayer; isVsyncConfigEarly)",
- __func__);
+ ATRACE_FORMAT_INSTANT("%s: false (LatchUnsignaledConfig::AutoSingleLayer; "
+ "isVsyncConfigEarly)",
+ __func__);
return false;
}
}
- if (!layer->simpleBufferUpdate(state)) {
- ALOGV("%s: false (!simpleBufferUpdate)", __func__);
- return false;
- }
-
- ALOGV("%s: true", __func__);
- return true;
+ return layer->isSimpleBufferUpdate(state);
}
status_t SurfaceFlinger::setTransactionState(
diff --git a/vulkan/libvulkan/swapchain.cpp b/vulkan/libvulkan/swapchain.cpp
index 5965953..af87306 100644
--- a/vulkan/libvulkan/swapchain.cpp
+++ b/vulkan/libvulkan/swapchain.cpp
@@ -877,6 +877,7 @@
int width, height;
int transform_hint;
int max_buffer_count;
+ int min_undequeued_buffers;
if (surface == VK_NULL_HANDLE) {
const InstanceData& instance_data = GetData(physicalDevice);
ProcHook::Extension surfaceless = ProcHook::GOOGLE_surfaceless_query;
@@ -929,17 +930,24 @@
return VK_ERROR_SURFACE_LOST_KHR;
}
+ err = window->query(window, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &min_undequeued_buffers);
+ if (err != android::OK) {
+ ALOGE("NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
+ strerror(-err), err);
+ return VK_ERROR_SURFACE_LOST_KHR;
+ }
+
if (pPresentMode && IsSharedPresentMode(pPresentMode->presentMode)) {
capabilities->minImageCount = 1;
capabilities->maxImageCount = 1;
} else if (pPresentMode && pPresentMode->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
- // TODO: use undequeued buffer requirement for more precise bound
- capabilities->minImageCount = std::min(max_buffer_count, 4);
+ capabilities->minImageCount =
+ std::min(max_buffer_count, min_undequeued_buffers + 2);
capabilities->maxImageCount = static_cast<uint32_t>(max_buffer_count);
} else {
- // TODO: if we're able to, provide better bounds on the number of buffers
- // for other modes as well.
- capabilities->minImageCount = std::min(max_buffer_count, 3);
+ capabilities->minImageCount =
+ std::min(max_buffer_count, min_undequeued_buffers + 1);
capabilities->maxImageCount = static_cast<uint32_t>(max_buffer_count);
}
}