Merge changes I866fe138,I0096a2fc

* changes:
  Remove BLOB AHWB tests from 1.2 VTS.
  Update loopTimeoutDuration documentation in NNAPI
diff --git a/compatibility_matrices/build/vintf_compatibility_matrix.go b/compatibility_matrices/build/vintf_compatibility_matrix.go
index e48f993..2772ba3 100644
--- a/compatibility_matrices/build/vintf_compatibility_matrix.go
+++ b/compatibility_matrices/build/vintf_compatibility_matrix.go
@@ -40,7 +40,15 @@
 		Description: "assemble_vintf -i ${inputs}",
 	}, "inputs")
 
-	kernelConfigTag = dependencyTag{name: "kernel-config"}
+	xmllintXsd = pctx.AndroidStaticRule("xmllint-xsd", blueprint.RuleParams{
+		Command:     `$XmlLintCmd --schema $xsd $in > /dev/null && touch -a $out`,
+		CommandDeps: []string{"$XmlLintCmd"},
+		Restat:      true,
+	}, "xsd")
+
+	kernelConfigTag  = dependencyTag{name: "kernel-config"}
+	schemaTag        = dependencyTag{name: "matrix-schema"}
+	schemaModuleName = "compatibility_matrix_schema"
 )
 
 const (
@@ -62,11 +70,13 @@
 	android.ModuleBase
 	properties vintfCompatibilityMatrixProperties
 
-	genFile android.WritablePath
+	genFile                android.WritablePath
+	additionalDependencies android.WritablePaths
 }
 
 func init() {
 	pctx.HostBinToolVariable("assembleVintfCmd", "assemble_vintf")
+	pctx.HostBinToolVariable("XmlLintCmd", "xmllint")
 	android.RegisterModuleType("vintf_compatibility_matrix", vintfCompatibilityMatrixFactory)
 }
 
@@ -82,6 +92,42 @@
 func (g *vintfCompatibilityMatrixRule) DepsMutator(ctx android.BottomUpMutatorContext) {
 	android.ExtractSourcesDeps(ctx, g.properties.Srcs)
 	ctx.AddDependency(ctx.Module(), kernelConfigTag, g.properties.Kernel_configs...)
+	ctx.AddDependency(ctx.Module(), schemaTag, schemaModuleName)
+}
+
+func (g *vintfCompatibilityMatrixRule) timestampFilePath(ctx android.ModuleContext, path android.Path) android.WritablePath {
+	return android.GenPathWithExt(ctx, "vintf-xmllint", path, "ts")
+}
+
+func (g *vintfCompatibilityMatrixRule) generateValidateBuildAction(ctx android.ModuleContext, path android.Path, schema android.Path) {
+	timestamp := g.timestampFilePath(ctx, path)
+	ctx.Build(pctx, android.BuildParams{
+		Rule:        xmllintXsd,
+		Description: "xmllint-xsd",
+		Input:       path,
+		Output:      timestamp,
+		Implicit:    schema,
+		Args: map[string]string{
+			"xsd": schema.String(),
+		},
+	})
+	g.additionalDependencies = append(g.additionalDependencies, timestamp)
+}
+
+func (g *vintfCompatibilityMatrixRule) getSchema(ctx android.ModuleContext) android.OptionalPath {
+	schemaModule := ctx.GetDirectDepWithTag(schemaModuleName, schemaTag)
+	sfp, ok := schemaModule.(android.SourceFileProducer)
+	if !ok {
+		ctx.ModuleErrorf("Implicit dependency %q has no srcs", ctx.OtherModuleName(schemaModule))
+		return android.OptionalPath{}
+	}
+
+	schemaSrcs := sfp.Srcs()
+	if len(schemaSrcs) != 1 {
+		ctx.PropertyErrorf(`srcs of implicit dependency %q has length %d != 1`, ctx.OtherModuleName(schemaModule), len(schemaSrcs))
+		return android.OptionalPath{}
+	}
+	return android.OptionalPathForPath(schemaSrcs[0])
 }
 
 func (g *vintfCompatibilityMatrixRule) GenerateAndroidBuildActions(ctx android.ModuleContext) {
@@ -91,7 +137,18 @@
 		outputFilename = g.Name()
 	}
 
+	schema := g.getSchema(ctx)
+	if !schema.Valid() {
+		return
+	}
+
 	inputPaths := android.PathsForModuleSrc(ctx, g.properties.Srcs)
+	for _, srcPath := range inputPaths {
+		g.generateValidateBuildAction(ctx, srcPath, schema.Path())
+	}
+
+	// No need to validate matrices from kernel configs because they are generated by
+	// assemble_vintf.
 	ctx.VisitDirectDepsWithTag(kernelConfigTag, func(m android.Module) {
 		if k, ok := m.(*configs.KernelConfigRule); ok {
 			inputPaths = append(inputPaths, k.OutputPath())
@@ -112,6 +169,7 @@
 			"inputs": strings.Join(inputPaths.Strings(), ":"),
 		},
 	})
+	g.generateValidateBuildAction(ctx, g.genFile, schema.Path())
 
 	ctx.InstallFile(android.PathForModuleInstall(ctx, "etc", relpath), outputFilename, g.genFile)
 }
@@ -126,6 +184,9 @@
 				if proptools.String(g.properties.Stem) != "" {
 					fmt.Fprintln(w, "LOCAL_MODULE_STEM :=", proptools.String(g.properties.Stem))
 				}
+				for _, path := range g.additionalDependencies {
+					fmt.Fprintln(w, "LOCAL_ADDITIONAL_DEPENDENCIES +=", path.String())
+				}
 			},
 		},
 	}
diff --git a/current.txt b/current.txt
index cb8ddee..aeadef1 100644
--- a/current.txt
+++ b/current.txt
@@ -584,11 +584,11 @@
 # ABI preserving changes to HALs during Android R
 b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardware.keymaster@4.0::IKeymasterDevice
 eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
-8eac60e1f724d141c71c69f06d4544acb720a55dfbbcd97fa01bb3d25ee4e2f5 android.hardware.neuralnetworks@1.0::types
+92e101b30e47bdf526a01c52cecfbe730def5997b8260ab497eb949eb2a6dcdf android.hardware.neuralnetworks@1.0::types
 5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
 fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
 40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-00649d29680f2c47edf60000c3ae7ae906ba638f0616947147e3676a83cf36fa android.hardware.neuralnetworks@1.2::types
+ee1a0dee5be00a6fe2d4d3270068c78016dcb194d768fe07ed894ea20904037f android.hardware.neuralnetworks@1.2::types
 a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
 1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
 fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -639,14 +639,14 @@
 6e904be0ddca5ae1de8eba020e6c38ed935ea7d80cd08f47787f137a0ca58555 android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
 ee9dc34b9925b8367b1111c72bd6d9d375432735e451572ca5a665d8516a7744 android.hardware.neuralnetworks@1.3::IPreparedModel
 eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-c9320b04ec302624985180a02d591bea5e435601fc411a6cabb58878e4e1ad68 android.hardware.neuralnetworks@1.3::types
+acf84925f8ee0a651f2ec547ac334034de266479b93af5434f6c1f25e66aba96 android.hardware.neuralnetworks@1.3::types
 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
 a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
 619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
 c9273429fcf98d797d3bb07fdba6f1be95bf960f9255cde169fd1ca4db85f856 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
 9b0a3ab6f4f74b971ed094426d8a443e29b512ff03e1ab50c07156396cdb2483 android.hardware.wifi.supplicant@1.3::types
-6b8dcd5e3e33a524cc7ebb14671a76ad3a2d333467397ce82acc4024346386f8 android.hardware.radio@1.5::types
+a5bcd595a5108312fe2eb402e716d0b7dab8eb689a2a5f54fdef3ff71f3babd5 android.hardware.radio@1.5::types
 b454df853441c12f6e425e8a60dd29fda20f5e6e39b93d1103e4b37495db38aa android.hardware.radio@1.5::IRadio
 fcbb0742a88215ee7a6d7ce0825d253eb2b50391fc6c8c48667f9fd7f6d4549e android.hardware.radio@1.5::IRadioIndication
 b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardware.radio@1.5::IRadioResponse
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 1175a30..620eefb 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -261,7 +261,7 @@
      *      filter.
      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
-     *      the bias must be of the same type.
+     *       the bias must be of the same type.
      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
      *      of 0 and bias_scale == input_scale * filter_scale.
@@ -289,7 +289,7 @@
      *      filter.
      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
-     *      the bias must be of the same
+     *       the bias must be of the same
      *      type.
      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
@@ -356,7 +356,7 @@
      *      specifying the filter.
      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
-     *      the bias must be of the same type.
+     *       the bias must be of the same type.
      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
      *      of 0 and bias_scale == input_scale * filter_scale.
@@ -385,7 +385,7 @@
      *      specifying the filter.
      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
-     *      the bias must be of the same type.
+     *       the bias must be of the same type.
      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
      *      of 0 and bias_scale == input_scale * filter_scale.
@@ -628,7 +628,7 @@
     HASHTABLE_LOOKUP = 10,
 
     /**
-     * Applies L2 normalization along the depth dimension.
+     * Applies L2 normalization along the axis dimension.
      *
      * The values in the output tensor are computed as:
      *
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index f0fd769..2c3c599 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -846,7 +846,7 @@
     HASHTABLE_LOOKUP = @1.1::OperationType:HASHTABLE_LOOKUP,
 
     /**
-     * Applies L2 normalization along the depth dimension.
+     * Applies L2 normalization along the axis dimension.
      *
      * The values in the output tensor are computed as:
      *
@@ -854,8 +854,7 @@
      *         input[batch, row, col, channel] /
      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
      *
-     * For input tensor with rank less than 4, independently normalizes each
-     * 1-D slice along dimension dim.
+     * By default the axis dimension is the last dimension of the input tensor.
      *
      * Supported tensor {@link OperandType}:
      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
@@ -3843,7 +3842,8 @@
      * * 1: A scalar {@link OperandType::INT32}, specifying the number of
      *      independent samples to draw for each row slice.
      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
-     *      specifying seeds used to initialize the random distribution.
+     *      specifying seeds used to initialize the random distribution. If both
+     *      provided seeds are 0, both will be randomly generated.
      * Outputs:
      * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
      *      [batches, samples], containing the drawn samples.
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index daaf22e..56930c2 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -833,7 +833,7 @@
     HASHTABLE_LOOKUP = @1.2::OperationType:HASHTABLE_LOOKUP,
 
     /**
-     * Applies L2 normalization along the depth dimension.
+     * Applies L2 normalization along the axis dimension.
      *
      * The values in the output tensor are computed as:
      *
@@ -841,8 +841,7 @@
      *         input[batch, row, col, channel] /
      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
      *
-     * For input tensor with rank less than 4, independently normalizes each
-     * 1-D slice along dimension dim.
+     * By default the axis dimension is the last dimension of the input tensor.
      *
      * Supported tensor {@link OperandType}:
      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
@@ -867,6 +866,10 @@
      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
+     *
+     *      NOTE: Before HAL version 1.3, if the elements along an axis are all zeros,
+     *      the result is undefined. Since HAL version 1.3, if the elements along an axis
+     *      are all zeros, the result is logical zero.
      */
     L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
 
@@ -4063,7 +4066,8 @@
      * * 1: A scalar {@link OperandType::INT32}, specifying the number of
      *      independent samples to draw for each row slice.
      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
-     *      specifying seeds used to initialize the random distribution.
+     *      specifying seeds used to initialize the random distribution. If both
+     *      provided seeds are 0, both will be randomly generated.
      * Outputs:
      * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
      *      [batches, samples], containing the drawn samples.
@@ -5168,6 +5172,8 @@
      * * {@link OperandType::TENSOR_FLOAT16}
      * * {@link OperandType::TENSOR_FLOAT32}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: A tensor, specifying the input. May be zero-sized.
      * * 1: A scalar, specifying the alpha parameter.
@@ -5197,6 +5203,8 @@
      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: A tensor, specifying the input. May be zero-sized.
      *
@@ -5215,6 +5223,8 @@
      * * {@link OperandType::TENSOR_FLOAT32}
      * * {@link OperandType::TENSOR_INT32}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: A 1-D tensor, specifying the desired output tensor shape.
      * * 1: A scalar, specifying the value to fill the output tensors with.
@@ -5248,6 +5258,8 @@
      * * {@link OperandType::TENSOR_QUANT8_SYMM}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: The input tensor.
      *
diff --git a/radio/1.5/types.hal b/radio/1.5/types.hal
index 248f56e..b061bd5 100644
--- a/radio/1.5/types.hal
+++ b/radio/1.5/types.hal
@@ -203,6 +203,9 @@
     vec<int32_t> channels;
 };
 
+/**
+ * IRadio 1.5 supports NGRAN bands up to V16.2.0
+ */
 enum NgranBands : int32_t {
     /** 3GPP TS 38.101-1, Table 5.2-1: FR1 bands */
     BAND_1 = 1,
@@ -243,7 +246,13 @@
     BAND_83 = 83,
     BAND_84 = 84,
     BAND_86 = 86,
+    BAND_89 = 89,
     BAND_90 = 90,
+    BAND_91 = 91,
+    BAND_92 = 92,
+    BAND_93 = 93,
+    BAND_94 = 94,
+    BAND_95 = 95,
     /** 3GPP TS 38.101-2, Table 5.2-1: FR2 bands */
     BAND_257 = 257,
     BAND_258 = 258,
@@ -251,6 +260,10 @@
     BAND_261 = 261,
 };
 
+/**
+ * Extended from @1.1 UtranBands to add TD-SCDMA bands
+ * IRadio 1.5 supports UTRAN bands up to V15.0.0
+ */
 enum UtranBands : @1.1::UtranBands {
     /** TD-SCDMA bands. 3GPP TS 25.102, Table 5.2: Frequency bands */
     BAND_A = 101,
@@ -262,6 +275,25 @@
 };
 
 /**
+ * Extended from @1.1 EutranBands to add more bands from 3GPP TS 36.101, Table 5.5: Operating bands
+ * IRadio 1.5 supports EUTRAN bands up to V16.4.0
+ */
+enum EutranBands : @1.1::EutranBands {
+    BAND_49 = 49,
+    BAND_50 = 50,
+    BAND_51 = 51,
+    BAND_52 = 52,
+    BAND_53 = 53,
+    BAND_71 = 71,
+    BAND_72 = 72,
+    BAND_73 = 73,
+    BAND_74 = 74,
+    BAND_85 = 85,
+    BAND_87 = 87,
+    BAND_88 = 88,
+};
+
+/**
  * Overwritten from @1.2::NetworkScanRequest to update RadioAccessSpecifier to 1.5 version.
  */
 struct NetworkScanRequest {
diff --git a/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp b/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
index 540529d..d0ebe4d 100644
--- a/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
+++ b/sensors/2.0/vts/functional/VtsHalSensorsV2_0TargetTest.cpp
@@ -898,6 +898,8 @@
     callback.waitForEvents(sensors, kFiveHundredMs + (5 * maxMinDelay));
     activateAllSensors(false);
 
+    getEnvironment()->unregisterCallback();
+
     for (const SensorInfo& sensor : sensors) {
         // Skip sensors that did not previously report an event
         if (lastEventTimestampMap.find(sensor.sensorHandle) == lastEventTimestampMap.end()) {