Merge "Abort operation upon arithmetic overflows" into sc-dev
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 9d9ed70..7caa457 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -262,6 +262,8 @@
     kParamIndexTunneledMode, // struct
     kParamIndexTunnelHandle, // int32[]
     kParamIndexTunnelSystemTime, // int64
+    kParamIndexTunnelHoldRender, // bool
+    kParamIndexTunnelStartRender, // bool
 
     // dmabuf allocator
     kParamIndexStoreDmaBufUsage,  // store, struct
@@ -2366,6 +2368,31 @@
         C2PortTunnelSystemTime;
 constexpr char C2_PARAMKEY_OUTPUT_RENDER_TIME[] = "output.render-time";
 
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * When a video frame is pushed to the decoder with this parameter set to true,
+ * the decoder must decode the frame, signal partial completion, and hold on the
+ * frame until C2StreamTunnelStartRender is set to true (which resets this
+ * flag). Flush will also result in the frames being returned back to the
+ * client (but not rendered).
+ */
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelHoldRender>
+        C2StreamTunnelHoldRender;
+constexpr char C2_PARAMKEY_TUNNEL_HOLD_RENDER[] = "output.tunnel-hold-render";
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * Upon receiving this flag, the decoder shall set C2StreamTunnelHoldRender to
+ * false, which shall cause any frames held for rendering to be immediately
+ * displayed, regardless of their timestamps.
+*/
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelStartRender>
+        C2StreamTunnelStartRender;
+constexpr char C2_PARAMKEY_TUNNEL_START_RENDER[] = "output.tunnel-start-render";
+
 C2ENUM(C2PlatformConfig::encoding_quality_level_t, uint32_t,
     NONE,
     S_HANDHELD,
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index ce15a30..16398a4 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -673,6 +673,10 @@
         mCodec->mCallback->onOutputBuffersChanged();
     }
 
+    void onFirstTunnelFrameReady() override {
+        mCodec->mCallback->onFirstTunnelFrameReady();
+    }
+
 private:
     CCodec *mCodec;
 };
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3c3b41d..f88408e 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -209,6 +209,7 @@
     int32_t flags = 0;
     int32_t tmp = 0;
     bool eos = false;
+    bool tunnelFirstFrame = false;
     if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
         eos = true;
         mInputMetEos = true;
@@ -217,6 +218,9 @@
     if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
         flags |= C2FrameData::FLAG_CODEC_CONFIG;
     }
+    if (buffer->meta()->findInt32("tunnel-first-frame", &tmp) && tmp) {
+        tunnelFirstFrame = true;
+    }
     ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
     std::list<std::unique_ptr<C2Work>> items;
     std::unique_ptr<C2Work> work(new C2Work);
@@ -288,6 +292,13 @@
         // TODO: fill info's
 
         work->input.configUpdate = std::move(mParamsToBeSet);
+        if (tunnelFirstFrame) {
+            C2StreamTunnelHoldRender::input tunnelHoldRender{
+                0u /* stream */,
+                C2_TRUE /* value */
+            };
+            work->input.configUpdate.push_back(C2Param::Copy(tunnelHoldRender));
+        }
         work->worklets.clear();
         work->worklets.emplace_back(new C2Worklet);
 
@@ -1724,6 +1735,15 @@
                 }
                 break;
             }
+            case C2StreamTunnelHoldRender::CORE_INDEX: {
+                C2StreamTunnelHoldRender::output firstTunnelFrameHoldRender;
+                if (!(worklet->output.flags & C2FrameData::FLAG_INCOMPLETE)) break;
+                if (!firstTunnelFrameHoldRender.updateFrom(*param)) break;
+                if (firstTunnelFrameHoldRender.value != C2_TRUE) break;
+                ALOGV("[%s] onWorkDone: first tunnel frame ready", mName);
+                mCCodecCallback->onFirstTunnelFrameReady();
+                break;
+            }
             default:
                 ALOGV("[%s] onWorkDone: unrecognized config update (%08X)",
                       mName, param->index());
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 45da003..5a2aca2 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -45,6 +45,7 @@
     virtual void onError(status_t err, enum ActionCode actionCode) = 0;
     virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
     virtual void onOutputBuffersChanged() = 0;
+    virtual void onFirstTunnelFrameReady() = 0;
 };
 
 /**
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 27e87e6..2df0ba2 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -938,6 +938,14 @@
             return value == 0 ? C2_FALSE : C2_TRUE;
         }));
 
+    add(ConfigMapper("android._trigger-tunnel-peek", C2_PARAMKEY_TUNNEL_START_RENDER, "value")
+        .limitTo(D::PARAM & D::VIDEO & D::DECODER)
+        .withMapper([](C2Value v) -> C2Value {
+            int32_t value = 0;
+            (void)v.get(&value);
+            return value == 0 ? C2_FALSE : C2_TRUE;
+        }));
+
     /* still to do
     constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
 
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4fd3a56..443e26c 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -1700,17 +1700,17 @@
         return ERROR_MALFORMED;
     }
 
-    size_t header_start = 0;
-    size_t header_lenth = 0;
+    long header_start = 0;
+    long header_length = 0;
     for (header_start = 0; header_start < frame.len - 4; header_start++) {
         if (ntohl(0x000001b3) == *(uint32_t*)((uint8_t*)tmpData.get() + header_start)) {
             break;
         }
     }
     bool isComplete_csd = false;
-    for (header_lenth = 0; header_lenth < frame.len - 4 - header_start; header_lenth++) {
+    for (header_length = 0; header_length < frame.len - 4 - header_start; header_length++) {
         if (ntohl(0x000001b8) == *(uint32_t*)((uint8_t*)tmpData.get()
-                                + header_start + header_lenth)) {
+                                + header_start + header_length)) {
             isComplete_csd = true;
             break;
         }
@@ -1720,7 +1720,7 @@
         return ERROR_MALFORMED;
     }
     addESDSFromCodecPrivate(trackInfo->mMeta, false,
-                              (uint8_t*)(tmpData.get()) + header_start, header_lenth);
+                            (uint8_t*)(tmpData.get()) + header_start, header_length);
 
     return OK;
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 4789ad2..88e752b 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -71,6 +71,25 @@
 Mutex gSoundTriggerCaptureStateListenerLock;
 sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
 
+// Binder for the AudioFlinger service that's passed to this client process from the system server.
+// This allows specific isolated processes to access the audio system. Currently used only for the
+// HotwordDetectionService.
+sp<IBinder> gAudioFlingerBinder = nullptr;
+
+void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
+    if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
+        ALOGE("setAudioFlingerBinder: received a binder of type %s",
+              String8(audioFlinger->getInterfaceDescriptor()).string());
+        return;
+    }
+    Mutex::Autolock _l(gLock);
+    if (gAudioFlinger != nullptr) {
+        ALOGW("setAudioFlingerBinder: ignoring; AudioFlinger connection already established.");
+        return;
+    }
+    gAudioFlingerBinder = audioFlinger;
+}
+
 // establish binder interface to AudioFlinger service
 const sp<IAudioFlinger> AudioSystem::get_audio_flinger() {
     sp<IAudioFlinger> af;
@@ -79,15 +98,19 @@
     {
         Mutex::Autolock _l(gLock);
         if (gAudioFlinger == 0) {
-            sp<IServiceManager> sm = defaultServiceManager();
             sp<IBinder> binder;
-            do {
-                binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
-                if (binder != 0)
-                    break;
-                ALOGW("AudioFlinger not published, waiting...");
-                usleep(500000); // 0.5 s
-            } while (true);
+            if (gAudioFlingerBinder != nullptr) {
+                binder = gAudioFlingerBinder;
+            } else {
+                sp<IServiceManager> sm = defaultServiceManager();
+                do {
+                    binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
+                    if (binder != 0)
+                        break;
+                    ALOGW("AudioFlinger not published, waiting...");
+                    usleep(500000); // 0.5 s
+                } while (true);
+            }
             if (gAudioFlingerClient == NULL) {
                 gAudioFlingerClient = new AudioFlingerClient();
             } else {
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 5e7def1..a9109c8 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -148,6 +148,11 @@
     static void setRecordConfigCallback(record_config_callback);
     static void setRoutingCallback(routing_callback cb);
 
+    // Sets the binder to use for accessing the AudioFlinger service. This enables the system server
+    // to grant specific isolated processes access to the audio system. Currently used only for the
+    // HotwordDetectionService.
+    static void setAudioFlingerBinder(const sp<IBinder>& audioFlinger);
+
     // helper function to obtain AudioFlinger service handle
     static const sp<IAudioFlinger> get_audio_flinger();
 
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
index a54e22f..f30eb54 100644
--- a/media/libaudioprocessing/AudioMixerBase.cpp
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
+#include <array>
 #include <sstream>
 #include <string.h>
 
@@ -1295,8 +1296,29 @@
 
 // Needs to derive a compile time constant (constexpr).  Could be targeted to go
 // to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
-        (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+constexpr int MIXTYPE_MONOVOL(int mixtype, int channels) {
+    if (channels <= FCC_2) {
+        return mixtype;
+    } else if (mixtype == MIXTYPE_MULTI) {
+        return MIXTYPE_MULTI_MONOVOL;
+    } else if (mixtype == MIXTYPE_MULTI_SAVEONLY) {
+        return MIXTYPE_MULTI_SAVEONLY_MONOVOL;
+    } else {
+        return mixtype;
+    }
+}
+
+// Helper to make a functional array from volumeRampMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+          std::size_t ... Is>
+static constexpr auto makeVRMArray(std::index_sequence<Is...>)
+{
+    using F = void(*)(TO*, size_t, const TI*, TA*, TV*, const TV*, TAV*, TAV);
+    return std::array<F, sizeof...(Is)>{
+            { &volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ...}
+        };
+}
 
 /* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
  * TO: int32_t (Q4.27) or float
@@ -1308,40 +1330,26 @@
 static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
         const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
 {
-    switch (channels) {
-    case 1:
-        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 2:
-        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 3:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 4:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 5:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 6:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 7:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 8:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
+    static constexpr auto volumeRampMultiArray =
+            makeVRMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+    if (channels > 0 && channels <= volumeRampMultiArray.size()) {
+        volumeRampMultiArray[channels - 1](out, frameCount, in, aux, vol, volinc, vola, volainc);
+    } else {
+        ALOGE("%s: invalid channel count:%d", __func__, channels);
     }
 }
 
+// Helper to make a functional array from volumeMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+          std::size_t ... Is>
+static constexpr auto makeVMArray(std::index_sequence<Is...>)
+{
+    using F = void(*)(TO*, size_t, const TI*, TA*, const TV*, TAV);
+    return std::array<F, sizeof...(Is)>{
+            { &volumeMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ... }
+        };
+}
+
 /* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
  * TO: int32_t (Q4.27) or float
  * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
@@ -1352,31 +1360,12 @@
 static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
         const TI* in, TA* aux, const TV *vol, TAV vola)
 {
-    switch (channels) {
-    case 1:
-        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 2:
-        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 3:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 4:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 5:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 6:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 7:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 8:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
-        break;
+    static constexpr auto volumeMultiArray =
+            makeVMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+    if (channels > 0 && channels <= volumeMultiArray.size()) {
+        volumeMultiArray[channels - 1](out, frameCount, in, aux, vol, vola);
+    } else {
+        ALOGE("%s: invalid channel count:%d", __func__, channels);
     }
 }
 
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index a56d9cb..cd47dc6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -293,6 +293,16 @@
     // NCHAN == 8
     proc(*out++, f(inp(), vol[0])); // side left
     proc(*out++, f(inp(), vol[1])); // side right
+    if constexpr (NCHAN > FCC_8) {
+        // Mutes to zero extended surround channels.
+        // 7.1.4 has the correct behavior.
+        // 22.2 has the behavior that FLC and FRC will be mixed instead
+        // of SL and SR and LFE will be center, not left.
+        for (int i = 8; i < NCHAN; ++i) {
+            // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
+            proc(*out++, f(inp(), 0.f));
+        }
+    }
 }
 
 /*
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
index 21d3d36..2292b19 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -548,61 +548,73 @@
     LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > FCC_LIMIT,
             "Resampler channels(%d) must be between 1 to %d", mChannelCount, FCC_LIMIT);
     // stride 16 (falls back to stride 2 for machines that do not support NEON)
+
+
+// For now use a #define as a compiler generated function table requires renaming.
+#pragma push_macro("AUDIORESAMPLERDYN_CASE")
+#undef AUDIORESAMPLERDYN_CASE
+#define AUDIORESAMPLERDYN_CASE(CHANNEL, LOCKED) \
+    case CHANNEL: if constexpr (CHANNEL <= FCC_LIMIT) {\
+        mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<CHANNEL, LOCKED, 16>; \
+    } break
+
     if (locked) {
         switch (mChannelCount) {
-        case 1:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
-            break;
-        case 2:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
-            break;
-        case 3:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>;
-            break;
-        case 4:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>;
-            break;
-        case 5:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>;
-            break;
-        case 6:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>;
-            break;
-        case 7:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>;
-            break;
-        case 8:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>;
-            break;
+        AUDIORESAMPLERDYN_CASE(1, true);
+        AUDIORESAMPLERDYN_CASE(2, true);
+        AUDIORESAMPLERDYN_CASE(3, true);
+        AUDIORESAMPLERDYN_CASE(4, true);
+        AUDIORESAMPLERDYN_CASE(5, true);
+        AUDIORESAMPLERDYN_CASE(6, true);
+        AUDIORESAMPLERDYN_CASE(7, true);
+        AUDIORESAMPLERDYN_CASE(8, true);
+        AUDIORESAMPLERDYN_CASE(9, true);
+        AUDIORESAMPLERDYN_CASE(10, true);
+        AUDIORESAMPLERDYN_CASE(11, true);
+        AUDIORESAMPLERDYN_CASE(12, true);
+        AUDIORESAMPLERDYN_CASE(13, true);
+        AUDIORESAMPLERDYN_CASE(14, true);
+        AUDIORESAMPLERDYN_CASE(15, true);
+        AUDIORESAMPLERDYN_CASE(16, true);
+        AUDIORESAMPLERDYN_CASE(17, true);
+        AUDIORESAMPLERDYN_CASE(18, true);
+        AUDIORESAMPLERDYN_CASE(19, true);
+        AUDIORESAMPLERDYN_CASE(20, true);
+        AUDIORESAMPLERDYN_CASE(21, true);
+        AUDIORESAMPLERDYN_CASE(22, true);
+        AUDIORESAMPLERDYN_CASE(23, true);
+        AUDIORESAMPLERDYN_CASE(24, true);
         }
     } else {
         switch (mChannelCount) {
-        case 1:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
-            break;
-        case 2:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
-            break;
-        case 3:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>;
-            break;
-        case 4:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>;
-            break;
-        case 5:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>;
-            break;
-        case 6:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>;
-            break;
-        case 7:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>;
-            break;
-        case 8:
-            mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>;
-            break;
+        AUDIORESAMPLERDYN_CASE(1, false);
+        AUDIORESAMPLERDYN_CASE(2, false);
+        AUDIORESAMPLERDYN_CASE(3, false);
+        AUDIORESAMPLERDYN_CASE(4, false);
+        AUDIORESAMPLERDYN_CASE(5, false);
+        AUDIORESAMPLERDYN_CASE(6, false);
+        AUDIORESAMPLERDYN_CASE(7, false);
+        AUDIORESAMPLERDYN_CASE(8, false);
+        AUDIORESAMPLERDYN_CASE(9, false);
+        AUDIORESAMPLERDYN_CASE(10, false);
+        AUDIORESAMPLERDYN_CASE(11, false);
+        AUDIORESAMPLERDYN_CASE(12, false);
+        AUDIORESAMPLERDYN_CASE(13, false);
+        AUDIORESAMPLERDYN_CASE(14, false);
+        AUDIORESAMPLERDYN_CASE(15, false);
+        AUDIORESAMPLERDYN_CASE(16, false);
+        AUDIORESAMPLERDYN_CASE(17, false);
+        AUDIORESAMPLERDYN_CASE(18, false);
+        AUDIORESAMPLERDYN_CASE(19, false);
+        AUDIORESAMPLERDYN_CASE(20, false);
+        AUDIORESAMPLERDYN_CASE(21, false);
+        AUDIORESAMPLERDYN_CASE(22, false);
+        AUDIORESAMPLERDYN_CASE(23, false);
+        AUDIORESAMPLERDYN_CASE(24, false);
         }
     }
+#pragma pop_macro("AUDIORESAMPLERDYN_CASE")
+
 #ifdef DEBUG_RESAMPLER
     printf("channels:%d  %s  stride:%d  %s  coef:%d  shift:%d\n",
             mChannelCount, locked ? "locked" : "interpolated",
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8fa7463..1986272 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -976,6 +976,10 @@
             return "BufferDecoded";
         case TunnelPeekState::kBufferRendered:
             return "BufferRendered";
+        case TunnelPeekState::kDisabledQueued:
+            return "DisabledQueued";
+        case TunnelPeekState::kEnabledQueued:
+            return "EnabledQueued";
         default:
             return default_string;
     }
@@ -986,25 +990,39 @@
     if (!msg->findInt32("tunnel-peek", &tunnelPeek)){
         return;
     }
+
+    TunnelPeekState previousState = mTunnelPeekState;
     if(tunnelPeek == 0){
-        if (mTunnelPeekState == TunnelPeekState::kEnabledNoBuffer) {
-            mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
-            ALOGV("TunnelPeekState: %s -> %s",
-                  asString(TunnelPeekState::kEnabledNoBuffer),
-                  asString(TunnelPeekState::kDisabledNoBuffer));
-            return;
+        switch (mTunnelPeekState) {
+            case TunnelPeekState::kEnabledNoBuffer:
+                mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
+                break;
+            case TunnelPeekState::kEnabledQueued:
+                mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+                break;
+            default:
+                ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+                return;
         }
     } else {
-        if (mTunnelPeekState == TunnelPeekState::kDisabledNoBuffer) {
-            mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
-            ALOGV("TunnelPeekState: %s -> %s",
-                  asString(TunnelPeekState::kDisabledNoBuffer),
-                  asString(TunnelPeekState::kEnabledNoBuffer));
-            return;
+        switch (mTunnelPeekState) {
+            case TunnelPeekState::kDisabledNoBuffer:
+                mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
+                break;
+            case TunnelPeekState::kDisabledQueued:
+                mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+                break;
+            case TunnelPeekState::kBufferDecoded:
+                msg->setInt32("android._trigger-tunnel-peek", 1);
+                mTunnelPeekState = TunnelPeekState::kBufferRendered;
+                break;
+            default:
+                ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+                return;
         }
     }
 
-    ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+    ALOGV("TunnelPeekState: %s -> %s", asString(previousState), asString(mTunnelPeekState));
 }
 
 void MediaCodec::updatePlaybackDuration(const sp<AMessage> &msg) {
@@ -3294,25 +3312,32 @@
                     if (mState != STARTED) {
                         break;
                     }
+                    TunnelPeekState previousState = mTunnelPeekState;
                     switch(mTunnelPeekState) {
                         case TunnelPeekState::kDisabledNoBuffer:
+                        case TunnelPeekState::kDisabledQueued:
                             mTunnelPeekState = TunnelPeekState::kBufferDecoded;
+                            ALOGV("First tunnel frame ready");
                             ALOGV("TunnelPeekState: %s -> %s",
-                                  asString(TunnelPeekState::kDisabledNoBuffer),
-                                  asString(TunnelPeekState::kBufferDecoded));
+                                  asString(previousState),
+                                  asString(mTunnelPeekState));
                             break;
                         case TunnelPeekState::kEnabledNoBuffer:
-                            mTunnelPeekState = TunnelPeekState::kBufferDecoded;
-                            ALOGV("TunnelPeekState: %s -> %s",
-                                  asString(TunnelPeekState::kEnabledNoBuffer),
-                                  asString(TunnelPeekState::kBufferDecoded));
+                        case TunnelPeekState::kEnabledQueued:
                             {
                                 sp<AMessage> parameters = new AMessage();
                                 parameters->setInt32("android._trigger-tunnel-peek", 1);
                                 mCodec->signalSetParameters(parameters);
                             }
+                            mTunnelPeekState = TunnelPeekState::kBufferRendered;
+                            ALOGV("First tunnel frame ready");
+                            ALOGV("TunnelPeekState: %s -> %s",
+                                  asString(previousState),
+                                  asString(mTunnelPeekState));
                             break;
                         default:
+                            ALOGV("Ignoring first tunnel frame ready, TunnelPeekState: %s",
+                                  asString(mTunnelPeekState));
                             break;
                     }
 
@@ -4777,6 +4802,28 @@
         buffer->meta()->setInt32("csd", true);
     }
 
+    if (mTunneled) {
+        TunnelPeekState previousState = mTunnelPeekState;
+        switch(mTunnelPeekState){
+            case TunnelPeekState::kEnabledNoBuffer:
+                buffer->meta()->setInt32("tunnel-first-frame", 1);
+                mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+                ALOGV("TunnelPeekState: %s -> %s",
+                        asString(previousState),
+                        asString(mTunnelPeekState));
+                break;
+            case TunnelPeekState::kDisabledNoBuffer:
+                buffer->meta()->setInt32("tunnel-first-frame", 1);
+                mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+                ALOGV("TunnelPeekState: %s -> %s",
+                        asString(previousState),
+                        asString(mTunnelPeekState));
+                break;
+            default:
+                break;
+        }
+    }
+
     status_t err = OK;
     if (hasCryptoOrDescrambler() && !c2Buffer && !memory) {
         AString *errorDetailMsg;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 0e6f0b3..d372140 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -377,15 +377,23 @@
     // This type is used to track the tunnel mode video peek state machine:
     //
     // DisabledNoBuffer -> EnabledNoBuffer  when tunnel-peek = true
+    // DisabledQueued   -> EnabledQueued    when tunnel-peek = true
+    // DisabledNoBuffer -> DisabledQueued   when first frame queued
     // EnabledNoBuffer  -> DisabledNoBuffer when tunnel-peek = false
+    // EnabledQueued    -> DisabledQueued   when tunnel-peek = false
+    // EnabledNoBuffer  -> EnabledQueued    when first frame queued
     // DisabledNoBuffer -> BufferDecoded    when kWhatFirstTunnelFrameReady
+    // DisabledQueued   -> BufferDecoded    when kWhatFirstTunnelFrameReady
     // EnabledNoBuffer  -> BufferDecoded    when kWhatFirstTunnelFrameReady
+    // EnabledQueued    -> BufferDecoded    when kWhatFirstTunnelFrameReady
     // BufferDecoded    -> BufferRendered   when kWhatFrameRendered
     // <all states>     -> EnabledNoBuffer  when flush
     // <all states>     -> EnabledNoBuffer  when stop then configure then start
     enum struct TunnelPeekState {
         kDisabledNoBuffer,
         kEnabledNoBuffer,
+        kDisabledQueued,
+        kEnabledQueued,
         kBufferDecoded,
         kBufferRendered,
     };
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 13e2ced..88d4eaf 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -48,6 +48,15 @@
 
 /*static*/ const FastMixerState FastMixer::sInitial;
 
+static audio_channel_mask_t getChannelMaskFromCount(size_t count) {
+    const audio_channel_mask_t mask = audio_channel_out_mask_from_count(count);
+    if (mask == AUDIO_CHANNEL_INVALID) {
+        // some counts have no positional masks. TODO: Update this to return index count?
+        return audio_channel_mask_for_index_assignment_from_count(count);
+    }
+    return mask;
+}
+
 FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
     : FastThread("cycle_ms", "load_us"),
     // mFastTrackNames
@@ -79,7 +88,7 @@
     mDummyDumpState = &mDummyFastMixerDumpState;
     // TODO: Add channel mask to NBAIO_Format.
     // We assume that the channel mask must be a valid positional channel mask.
-    mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+    mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
 
     unsigned i;
     for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -238,7 +247,7 @@
             LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
 
             if (mSinkChannelMask == AUDIO_CHANNEL_NONE) {
-                mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+                mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
             }
             mAudioChannelCount = mSinkChannelCount - audio_channel_count_from_out_mask(
                     mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 9c7b506..83d2bc9 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -131,8 +131,6 @@
         "android.permission.CAMERA_OPEN_CLOSE_LISTENER");
 static const String16
         sCameraInjectExternalCameraPermission("android.permission.CAMERA_INJECT_EXTERNAL_CAMERA");
-static int sMemFd = -1;
-const char *sFileName = "lastOpenSessionDumpFile";
 
 static constexpr int32_t kVendorClientScore = resource_policy::PERCEPTIBLE_APP_ADJ;
 static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
@@ -150,11 +148,6 @@
         mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE) {
     ALOGI("CameraService started (pid=%d)", getpid());
     mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
-
-    sMemFd = memfd_create(sFileName, MFD_ALLOW_SEALING);
-    if (sMemFd == -1) {
-        ALOGE("%s:Error while creating the file:%s", __FUNCTION__, sFileName);
-    }
 }
 
 void CameraService::onFirstRef()
@@ -1644,21 +1637,6 @@
     }
 
     *device = client;
-
-    Mutex::Autolock lock(mServiceLock);
-
-    // Clear the previous cached logs and reposition the
-    // file offset to beginning of the file to log new data.
-    // If either truncate or lseek fails, close the previous file and create a new one.
-    if ((ftruncate(sMemFd, 0) == -1) || (lseek(sMemFd, 0, SEEK_SET) == -1)) {
-        ALOGE("%s: Error while truncating the file:%s", __FUNCTION__, sFileName);
-        // Close the previous memfd.
-        close(sMemFd);
-        // If failure to wipe the data, then create a new file and
-        // assign the new value to sMemFd.
-        sMemFd = memfd_create(sFileName, MFD_ALLOW_SEALING);
-    }
-
     return ret;
 }
 
@@ -3854,27 +3832,6 @@
     return locked;
 }
 
-void CameraService::cacheDump() {
-    const Vector<String16>& args = Vector<String16>();
-    ATRACE_CALL();
-
-    Mutex::Autolock lock(mServiceLock);
-
-    Mutex::Autolock l(mCameraStatesLock);
-
-    // Start collecting the info for open sessions and store it in temp file.
-    for (const auto& state : mCameraStates) {
-        String8 cameraId = state.first;
-
-        auto clientDescriptor = mActiveClientManager.get(cameraId);
-        if (clientDescriptor != nullptr) {
-           dprintf(sMemFd, "== Camera device %s dynamic info: ==\n", cameraId.string());
-           // Log the current open session info before device is disconnected.
-           dumpOpenSessionClientLogs(sMemFd, args, cameraId);
-        }
-    }
-}
-
 status_t CameraService::dump(int fd, const Vector<String16>& args) {
     ATRACE_CALL();
 
@@ -3941,10 +3898,21 @@
 
         auto clientDescriptor = mActiveClientManager.get(cameraId);
         if (clientDescriptor != nullptr) {
-            // log the current open session info
-            dumpOpenSessionClientLogs(fd, args, cameraId);
+            dprintf(fd, "  Device %s is open. Client instance dump:\n",
+                    cameraId.string());
+            dprintf(fd, "    Client priority score: %d state: %d\n",
+                    clientDescriptor->getPriority().getScore(),
+                    clientDescriptor->getPriority().getState());
+            dprintf(fd, "    Client PID: %d\n", clientDescriptor->getOwnerId());
+
+            auto client = clientDescriptor->getValue();
+            dprintf(fd, "    Client package: %s\n",
+                    String8(client->getPackageName()).string());
+
+            client->dumpClient(fd, args);
         } else {
-            dumpClosedSessionClientLogs(fd, cameraId);
+            dprintf(fd, "  Device %s is closed, no client instance\n",
+                    cameraId.string());
         }
 
     }
@@ -4001,53 +3969,9 @@
             }
         }
     }
-
-    Mutex::Autolock lock(mServiceLock);
-
-    // Dump info from previous open sessions.
-    // Reposition the offset to beginning of the file before reading
-
-    if ((sMemFd >= 0) && (lseek(sMemFd, 0, SEEK_SET) != -1)) {
-        dprintf(fd, "\n**********Dumpsys from previous open session**********\n");
-        ssize_t size_read;
-        char buf[4096];
-        while ((size_read = read(sMemFd, buf, (sizeof(buf) - 1))) > 0) {
-            // Read data from file to a small buffer and write it to fd.
-            write(fd, buf, size_read);
-            if (size_read == -1){
-                ALOGE("%s: Error during reading the file:%s", __FUNCTION__, sFileName);
-                break;
-            }
-        }
-        dprintf(fd, "\n**********End of Dumpsys from previous open session**********\n");
-    } else {
-        ALOGE("%s: Error during reading the file:%s", __FUNCTION__, sFileName);
-    }
     return NO_ERROR;
 }
 
-void CameraService::dumpOpenSessionClientLogs(int fd,
-        const Vector<String16>& args, const String8& cameraId) {
-    auto clientDescriptor = mActiveClientManager.get(cameraId);
-    dprintf(fd, "  Device %s is open. Client instance dump:\n",
-        cameraId.string());
-    dprintf(fd, "    Client priority score: %d state: %d\n",
-        clientDescriptor->getPriority().getScore(),
-        clientDescriptor->getPriority().getState());
-    dprintf(fd, "    Client PID: %d\n", clientDescriptor->getOwnerId());
-
-    auto client = clientDescriptor->getValue();
-    dprintf(fd, "    Client package: %s\n",
-        String8(client->getPackageName()).string());
-
-    client->dumpClient(fd, args);
-}
-
-void CameraService::dumpClosedSessionClientLogs(int fd, const String8& cameraId) {
-    dprintf(fd, "  Device %s is closed, no client instance\n",
-                    cameraId.string());
-}
-
 void CameraService::dumpEventLog(int fd) {
     dprintf(fd, "\n== Camera service events log (most recent at top): ==\n");
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 7b0037e..d1ed59a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -202,8 +202,6 @@
             std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false,
             bool isProcessLocalTest = false);
 
-    void cacheDump();
-
     // Monitored UIDs availability notification
     void                notifyMonitoredUids();
 
@@ -787,12 +785,6 @@
     // Return NO_ERROR if the device with a give ID can be connected to
     status_t checkIfDeviceIsUsable(const String8& cameraId) const;
 
-    // Adds client logs during open session to the file pointed by fd.
-    void dumpOpenSessionClientLogs(int fd, const Vector<String16>& args, const String8& cameraId);
-
-    // Adds client logs during closed session to the file pointed by fd.
-    void dumpClosedSessionClientLogs(int fd, const String8& cameraId);
-
     // Container for managing currently active application-layer clients
     CameraClientManager mActiveClientManager;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 56e5ae1..ce479a1 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -195,8 +195,6 @@
 
     ALOGV("Camera %s: Shutting down", TClientBase::mCameraIdStr.string());
 
-    // Before detaching the device, cache the info from current open session
-    Camera2ClientBase::getCameraService()->cacheDump();
     detachDevice();
 
     CameraService::BasicClient::disconnect();
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 77e1c40..5b4129a 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -445,90 +445,118 @@
     TunerFrontendCapabilities caps;
     switch (halInfo.type) {
         case FrontendType::ANALOG: {
-            TunerFrontendAnalogCapabilities analogCaps{
-                .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
-                .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
-            };
-            caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendAnalogCapabilities analogCaps{
+                    .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
+                    .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
+                };
+                caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+            }
             break;
         }
         case FrontendType::ATSC: {
-            TunerFrontendAtscCapabilities atscCaps{
-                .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
-            };
-            caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendAtscCapabilities atscCaps{
+                    .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
+                };
+                caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+            }
             break;
         }
         case FrontendType::ATSC3: {
-            TunerFrontendAtsc3Capabilities atsc3Caps{
-                .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
-                .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
-                .timeInterleaveModeCap =
-                        (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
-                .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
-                .demodOutputFormatCap = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
-                .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
-            };
-            caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendAtsc3Capabilities atsc3Caps{
+                    .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
+                    .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
+                    .timeInterleaveModeCap =
+                            (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
+                    .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
+                    .demodOutputFormatCap
+                        = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
+                    .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
+                };
+                caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+            }
             break;
         }
         case FrontendType::DVBC: {
-            TunerFrontendCableCapabilities cableCaps{
-                .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
-                .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
-                .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
-            };
-            caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendCableCapabilities cableCaps{
+                    .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
+                    .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
+                    .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
+                };
+                caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+            }
             break;
         }
         case FrontendType::DVBS: {
-            TunerFrontendDvbsCapabilities dvbsCaps{
-                .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
-                .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
-                .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
-            };
-            caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendDvbsCapabilities dvbsCaps{
+                    .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
+                    .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
+                    .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
+                };
+                caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+            }
             break;
         }
         case FrontendType::DVBT: {
-            TunerFrontendDvbtCapabilities dvbtCaps{
-                .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
-                .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
-                .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
-                .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
-                .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
-                .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
-                .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
-                .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
-            };
-            caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendDvbtCapabilities dvbtCaps{
+                    .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
+                    .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
+                    .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
+                    .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
+                    .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
+                    .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
+                    .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
+                    .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+                };
+                caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+            }
             break;
         }
         case FrontendType::ISDBS: {
-            TunerFrontendIsdbsCapabilities isdbsCaps{
-                .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
-                .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
-            };
-            caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendIsdbsCapabilities isdbsCaps{
+                    .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
+                    .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
+                };
+                caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+            }
             break;
         }
         case FrontendType::ISDBS3: {
-            TunerFrontendIsdbs3Capabilities isdbs3Caps{
-                .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
-                .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
-            };
-            caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendIsdbs3Capabilities isdbs3Caps{
+                    .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
+                    .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
+                };
+                caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+            }
             break;
         }
         case FrontendType::ISDBT: {
-            TunerFrontendIsdbtCapabilities isdbtCaps{
-                .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
-                .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
-                .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
-                .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
-                .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
-            };
-            caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
+                    == halInfo.frontendCaps.getDiscriminator()) {
+                TunerFrontendIsdbtCapabilities isdbtCaps{
+                    .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
+                    .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
+                    .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
+                    .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
+                    .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
+                };
+                caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+            }
             break;
         }
         default: