Merge "Use networkstack_client instead of aidl_interface"
diff --git a/Tethering/Android.bp b/Tethering/Android.bp
index bbf3a8f..9805c54 100644
--- a/Tethering/Android.bp
+++ b/Tethering/Android.bp
@@ -144,3 +144,10 @@
     apex_available: ["com.android.tethering"],
     min_sdk_version: "30",
 }
+
+sdk {
+    name: "tethering-module-sdk",
+    java_sdk_libs: [
+        "framework-tethering",
+    ],
+}
diff --git a/Tethering/apex/Android.bp b/Tethering/apex/Android.bp
index 7970944..acb87bf 100644
--- a/Tethering/apex/Android.bp
+++ b/Tethering/apex/Android.bp
@@ -18,7 +18,7 @@
     name: "com.android.tethering",
     // TODO: make updatable again once this contains only updatable artifacts (in particular, this
     // cannot build as updatable unless service-connectivity builds against stable API).
-    // updatable: true,
+    updatable: false,
     // min_sdk_version: "30",
     java_libs: [
         "framework-tethering",
diff --git a/Tethering/bpf_progs/bpf_tethering.h b/Tethering/bpf_progs/bpf_tethering.h
index c8ada88..6591e81 100644
--- a/Tethering/bpf_progs/bpf_tethering.h
+++ b/Tethering/bpf_progs/bpf_tethering.h
@@ -40,8 +40,11 @@
     ERR(IS_IP_FRAG)          \
     ERR(CHECKSUM)            \
     ERR(NON_TCP_UDP)         \
+    ERR(NON_TCP)             \
+    ERR(SHORT_L4_HEADER)     \
     ERR(SHORT_TCP_HEADER)    \
     ERR(SHORT_UDP_HEADER)    \
+    ERR(UDP_CSUM_ZERO)       \
     ERR(TRUNCATED_IPV4)      \
     ERR(_MAX)
 
diff --git a/Tethering/bpf_progs/offload.c b/Tethering/bpf_progs/offload.c
index bf60e67..16d6ecf 100644
--- a/Tethering/bpf_progs/offload.c
+++ b/Tethering/bpf_progs/offload.c
@@ -68,7 +68,25 @@
 #define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
 #define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
 
-// ----- Tethering stats and data limits -----
+// ----- Tethering Error Counters -----
+
+DEFINE_BPF_MAP_GRW(tether_error_map, ARRAY, uint32_t, uint32_t, BPF_TETHER_ERR__MAX,
+                   AID_NETWORK_STACK)
+
+#define COUNT_AND_RETURN(counter, ret) do {                  \
+    uint32_t code = BPF_TETHER_ERR_ ## counter;                 \
+    uint32_t *count = bpf_tether_error_map_lookup_elem(&code);  \
+    if (count) __sync_fetch_and_add(count, 1);               \
+    return ret;                                              \
+} while(0)
+
+#define TC_DROP(counter) COUNT_AND_RETURN(counter, TC_ACT_SHOT)
+#define TC_PUNT(counter) COUNT_AND_RETURN(counter, TC_ACT_OK)
+
+#define XDP_DROP(counter) COUNT_AND_RETURN(counter, XDP_DROP)
+#define XDP_PUNT(counter) COUNT_AND_RETURN(counter, XDP_PASS)
+
+// ----- Tethering Data Stats and Limits -----
 
 // Tethering stats, indexed by upstream interface.
 DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, AID_NETWORK_STACK)
@@ -88,19 +106,6 @@
 DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
                    AID_NETWORK_STACK)
 
-DEFINE_BPF_MAP_GRW(tether_error_map, ARRAY, __u32, __u32, BPF_TETHER_ERR__MAX,
-                   AID_NETWORK_STACK)
-
-#define COUNT_AND_RETURN(counter, ret) do {                    \
-    __u32 code = BPF_TETHER_ERR_ ## counter;                 \
-    __u32 *count = bpf_tether_error_map_lookup_elem(&code);  \
-    if (count) __sync_fetch_and_add(count, 1);               \
-    return ret;                                              \
-} while(0)
-
-#define DROP(counter) COUNT_AND_RETURN(counter, TC_ACT_SHOT)
-#define PUNT(counter) COUNT_AND_RETURN(counter, TC_ACT_OK)
-
 static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
         const bool downstream) {
     const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
@@ -122,11 +127,11 @@
     if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
 
     // IP version must be 6
-    if (ip6->version != 6) PUNT(INVALID_IP_VERSION);
+    if (ip6->version != 6) TC_PUNT(INVALID_IP_VERSION);
 
     // Cannot decrement during forward if already zero or would be zero,
     // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
-    if (ip6->hop_limit <= 1) PUNT(LOW_TTL);
+    if (ip6->hop_limit <= 1) TC_PUNT(LOW_TTL);
 
     // If hardware offload is running and programming flows based on conntrack entries,
     // try not to interfere with it.
@@ -135,27 +140,27 @@
 
         // Make sure we can get at the tcp header
         if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end)
-            PUNT(INVALID_TCP_HEADER);
+            TC_PUNT(INVALID_TCP_HEADER);
 
         // Do not offload TCP packets with any one of the SYN/FIN/RST flags
-        if (tcph->syn || tcph->fin || tcph->rst) PUNT(TCP_CONTROL_PACKET);
+        if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCP_CONTROL_PACKET);
     }
 
     // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
     __be32 src32 = ip6->saddr.s6_addr32[0];
     if (src32 != htonl(0x0064ff9b) &&                        // 64:ff9b:/32 incl. XLAT464 WKP
         (src32 & htonl(0xe0000000)) != htonl(0x20000000))    // 2000::/3 Global Unicast
-        PUNT(NON_GLOBAL_SRC);
+        TC_PUNT(NON_GLOBAL_SRC);
 
     // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
     __be32 dst32 = ip6->daddr.s6_addr32[0];
     if (dst32 != htonl(0x0064ff9b) &&                        // 64:ff9b:/32 incl. XLAT464 WKP
         (dst32 & htonl(0xe0000000)) != htonl(0x20000000))    // 2000::/3 Global Unicast
-        PUNT(NON_GLOBAL_DST);
+        TC_PUNT(NON_GLOBAL_DST);
 
     // In the upstream direction do not forward traffic within the same /64 subnet.
     if (!downstream && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
-        PUNT(LOCAL_SRC_DST);
+        TC_PUNT(LOCAL_SRC_DST);
 
     TetherDownstream6Key kd = {
             .iif = skb->ifindex,
@@ -177,15 +182,15 @@
     TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
 
     // If we don't have anywhere to put stats, then abort...
-    if (!stat_v) PUNT(NO_STATS_ENTRY);
+    if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
 
     uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
 
     // If we don't have a limit, then abort...
-    if (!limit_v) PUNT(NO_LIMIT_ENTRY);
+    if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
 
     // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
-    if (v->pmtu < IPV6_MIN_MTU) PUNT(BELOW_IPV6_MTU);
+    if (v->pmtu < IPV6_MIN_MTU) TC_PUNT(BELOW_IPV6_MTU);
 
     // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
     // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
@@ -210,7 +215,7 @@
     // a packet we let the core stack deal with things.
     // (The core stack needs to handle limits correctly anyway,
     // since we don't offload all traffic in both directions)
-    if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) PUNT(LIMIT_REACHED);
+    if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
 
     if (!is_ethernet) {
         // Try to inject an ethernet header, and simply return if we fail.
@@ -218,7 +223,7 @@
         // because this is easier and the kernel will strip extraneous ethernet header.
         if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
             __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
-            PUNT(CHANGE_HEAD_FAILED);
+            TC_PUNT(CHANGE_HEAD_FAILED);
         }
 
         // bpf_skb_change_head() invalidates all pointers - reload them
@@ -230,7 +235,7 @@
         // I do not believe this can ever happen, but keep the verifier happy...
         if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
             __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
-            DROP(TOO_SHORT);
+            TC_DROP(TOO_SHORT);
         }
     };
 
@@ -341,7 +346,7 @@
 DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
 
 static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
-        const bool downstream) {
+        const bool downstream, const bool updatetime) {
     const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
     void* data = (void*)(long)skb->data;
     const void* data_end = (void*)(long)skb->data_end;
@@ -361,10 +366,10 @@
     if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_OK;
 
     // IP version must be 4
-    if (ip->version != 4) PUNT(INVALID_IP_VERSION);
+    if (ip->version != 4) TC_PUNT(INVALID_IP_VERSION);
 
     // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
-    if (ip->ihl != 5) PUNT(HAS_IP_OPTIONS);
+    if (ip->ihl != 5) TC_PUNT(HAS_IP_OPTIONS);
 
     // Calculate the IPv4 one's complement checksum of the IPv4 header.
     __wsum sum4 = 0;
@@ -375,36 +380,91 @@
     sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
     sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
     // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
-    if (sum4 != 0xFFFF) PUNT(CHECKSUM);
+    if (sum4 != 0xFFFF) TC_PUNT(CHECKSUM);
 
     // Minimum IPv4 total length is the size of the header
-    if (ntohs(ip->tot_len) < sizeof(*ip)) PUNT(TRUNCATED_IPV4);
+    if (ntohs(ip->tot_len) < sizeof(*ip)) TC_PUNT(TRUNCATED_IPV4);
 
     // We are incapable of dealing with IPv4 fragments
-    if (ip->frag_off & ~htons(IP_DF)) PUNT(IS_IP_FRAG);
+    if (ip->frag_off & ~htons(IP_DF)) TC_PUNT(IS_IP_FRAG);
 
     // Cannot decrement during forward if already zero or would be zero,
     // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
-    if (ip->ttl <= 1) PUNT(LOW_TTL);
+    if (ip->ttl <= 1) TC_PUNT(LOW_TTL);
 
-    const bool is_tcp = (ip->protocol == IPPROTO_TCP);
+    // If we cannot update the 'last_used' field due to lack of bpf_ktime_get_boot_ns() helper,
+    // then it is not safe to offload UDP due to the small conntrack timeouts, as such,
+    // in such a situation we can only support TCP.  This also has the added nice benefit of
+    // using a separate error counter, and thus making it obvious which version of the program
+    // is loaded.
+    if (!updatetime && ip->protocol != IPPROTO_TCP) TC_PUNT(NON_TCP);
 
-    // We do not support anything besides TCP and UDP
-    if (!is_tcp && (ip->protocol != IPPROTO_UDP)) PUNT(NON_TCP_UDP);
+    // We do not support offloading anything besides IPv4 TCP and UDP, due to need for NAT,
+    // but no need to check this if !updatetime due to check immediately above.
+    if (updatetime && (ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
+        TC_PUNT(NON_TCP_UDP);
+
+    // We want to make sure that the compiler will, in the !updatetime case, entirely optimize
+    // out all the non-tcp logic.  Also note that at this point is_udp === !is_tcp.
+    const bool is_tcp = !updatetime || (ip->protocol == IPPROTO_TCP);
+
+    // This is a bit of a hack to make things easier on the bpf verifier.
+    // (In particular I believe the Linux 4.14 kernel's verifier can get confused later on about
+    // what offsets into the packet are valid and can spuriously reject the program, this is
+    // because it fails to realize that is_tcp && !is_tcp is impossible)
+    //
+    // For both TCP & UDP we'll need to read and modify the src/dst ports, which so happen to
+    // always be in the first 4 bytes of the L4 header.  Additionally for UDP we'll need access
+    // to the checksum field which is in bytes 7 and 8.  While for TCP we'll need to read the
+    // TCP flags (at offset 13) and access to the checksum field (2 bytes at offset 16).
+    // As such we *always* need access to at least 8 bytes.
+    if (data + l2_header_size + sizeof(*ip) + 8 > data_end) TC_PUNT(SHORT_L4_HEADER);
 
     struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
     struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
 
     if (is_tcp) {
         // Make sure we can get at the tcp header
-        if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end) PUNT(SHORT_TCP_HEADER);
+        if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end)
+            TC_PUNT(SHORT_TCP_HEADER);
 
         // If hardware offload is running and programming flows based on conntrack entries, try not
         // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
-        if (tcph->syn || tcph->fin || tcph->rst) PUNT(TCP_CONTROL_PACKET);
+        if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCP_CONTROL_PACKET);
     } else { // UDP
         // Make sure we can get at the udp header
-        if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end) PUNT(SHORT_UDP_HEADER);
+        if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end)
+            TC_PUNT(SHORT_UDP_HEADER);
+
+        // Skip handling of CHECKSUM_COMPLETE packets with udp checksum zero due to need for
+        // additional updating of skb->csum (this could be fixed up manually with more effort).
+        //
+        // Note that the in-kernel implementation of 'int64_t bpf_csum_update(skb, u32 csum)' is:
+        //   if (skb->ip_summed == CHECKSUM_COMPLETE)
+        //     return (skb->csum = csum_add(skb->csum, csum));
+        //   else
+        //     return -ENOTSUPP;
+        //
+        // So this will punt any CHECKSUM_COMPLETE packet with a zero UDP checksum,
+        // and leave all other packets unaffected (since it just at most adds zero to skb->csum).
+        //
+        // In practice this should almost never trigger because most nics do not generate
+        // CHECKSUM_COMPLETE packets on receive - especially so for nics/drivers on a phone.
+        //
+        // Additionally since we're forwarding, in most cases the value of the skb->csum field
+        // shouldn't matter (it's not used by physical nic egress).
+        //
+        // It only matters if we're ingressing through a CHECKSUM_COMPLETE capable nic
+        // and egressing through a virtual interface looping back to the kernel itself
+        // (ie. something like veth) where the CHECKSUM_COMPLETE/skb->csum can get reused
+        // on ingress.
+        //
+        // If we were in the kernel we'd simply probably call
+        //   void skb_checksum_complete_unset(struct sk_buff *skb) {
+        //     if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE;
+        //   }
+        // here instead.  Perhaps there should be a bpf helper for that?
+        if (!udph->check && (bpf_csum_update(skb, 0) >= 0)) TC_PUNT(UDP_CSUM_ZERO);
     }
 
     Tether4Key k = {
@@ -428,15 +488,15 @@
     TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
 
     // If we don't have anywhere to put stats, then abort...
-    if (!stat_v) PUNT(NO_STATS_ENTRY);
+    if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
 
     uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
 
     // If we don't have a limit, then abort...
-    if (!limit_v) PUNT(NO_LIMIT_ENTRY);
+    if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
 
     // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
-    if (v->pmtu < 68) PUNT(BELOW_IPV4_MTU);
+    if (v->pmtu < 68) TC_PUNT(BELOW_IPV4_MTU);
 
     // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
     // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
@@ -461,10 +521,7 @@
     // a packet we let the core stack deal with things.
     // (The core stack needs to handle limits correctly anyway,
     // since we don't offload all traffic in both directions)
-    if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) PUNT(LIMIT_REACHED);
-
-
-if (!is_tcp) return TC_ACT_OK; // HACK
+    if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
 
     if (!is_ethernet) {
         // Try to inject an ethernet header, and simply return if we fail.
@@ -472,7 +529,7 @@
         // because this is easier and the kernel will strip extraneous ethernet header.
         if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
             __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
-            PUNT(CHANGE_HEAD_FAILED);
+            TC_PUNT(CHANGE_HEAD_FAILED);
         }
 
         // bpf_skb_change_head() invalidates all pointers - reload them
@@ -486,7 +543,7 @@
         // I do not believe this can ever happen, but keep the verifier happy...
         if (data + sizeof(struct ethhdr) + sizeof(*ip) + (is_tcp ? sizeof(*tcph) : sizeof(*udph)) > data_end) {
             __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
-            DROP(TOO_SHORT);
+            TC_DROP(TOO_SHORT);
         }
     };
 
@@ -498,30 +555,39 @@
     // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
     *eth = v->macHeader;
 
+    const int l4_offs_csum = is_tcp ? ETH_IP4_TCP_OFFSET(check) : ETH_IP4_UDP_OFFSET(check);
     const int sz4 = sizeof(__be32);
+    // UDP 0 is special and stored as FFFF (this flag also causes a csum of 0 to be unmodified)
+    const int l4_flags = is_tcp ? 0 : BPF_F_MARK_MANGLED_0;
     const __be32 old_daddr = k.dst4.s_addr;
     const __be32 old_saddr = k.src4.s_addr;
     const __be32 new_daddr = v->dst46.s6_addr32[3];
     const __be32 new_saddr = v->src46.s6_addr32[3];
 
-    bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR);
+    bpf_l4_csum_replace(skb, l4_offs_csum, old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
     bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
     bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
 
-    bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR);
+    bpf_l4_csum_replace(skb, l4_offs_csum, old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
     bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
     bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
 
     const int sz2 = sizeof(__be16);
-    bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.srcPort, v->srcPort, sz2);
-    bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(source), &v->srcPort, sz2, 0);
+    // The offsets for TCP and UDP ports: source (u16 @ L4 offset 0) & dest (u16 @ L4 offset 2) are
+    // actually the same, so the compiler should just optimize them both down to a constant.
+    bpf_l4_csum_replace(skb, l4_offs_csum, k.srcPort, v->srcPort, sz2 | l4_flags);
+    bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(source) : ETH_IP4_UDP_OFFSET(source),
+                        &v->srcPort, sz2, 0);
 
-    bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.dstPort, v->dstPort, sz2);
-    bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(dest), &v->dstPort, sz2, 0);
+    bpf_l4_csum_replace(skb, l4_offs_csum, k.dstPort, v->dstPort, sz2 | l4_flags);
+    bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(dest) : ETH_IP4_UDP_OFFSET(dest),
+                        &v->dstPort, sz2, 0);
 
-// TTL dec
+    // TEMP HACK: lack of TTL decrement
 
-// v->last_used = bpf_ktime_get_boot_ns();
+    // This requires the bpf_ktime_get_boot_ns() helper which was added in 5.8,
+    // and backported to all Android Common Kernel 4.14+ trees.
+    if (updatetime) v->last_used = bpf_ktime_get_boot_ns();
 
     __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
     __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
@@ -535,54 +601,123 @@
     return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
 }
 
-// Real implementations for 5.9+ kernels
+// Full featured (required) implementations for 5.8+ kernels
 
-DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
-                     sched_cls_tether_downstream4_ether_5_9, KVER(5, 9, 0))
+DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
+                     sched_cls_tether_downstream4_ether_5_8, KVER(5, 8, 0))
 (struct __sk_buff* skb) {
-    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true);
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ true);
 }
 
-DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
-                     sched_cls_tether_downstream4_rawip_5_9, KVER(5, 9, 0))
+DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
+                     sched_cls_tether_downstream4_rawip_5_8, KVER(5, 8, 0))
 (struct __sk_buff* skb) {
-    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true);
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ true);
 }
 
-DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
-                     sched_cls_tether_upstream4_ether_5_9, KVER(5, 9, 0))
+DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
+                     sched_cls_tether_upstream4_ether_5_8, KVER(5, 8, 0))
 (struct __sk_buff* skb) {
-    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false);
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ true);
 }
 
-DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
-                     sched_cls_tether_upstream4_rawip_5_9, KVER(5, 9, 0))
+DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
+                     sched_cls_tether_upstream4_rawip_5_8, KVER(5, 8, 0))
 (struct __sk_buff* skb) {
-    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false);
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ true);
 }
 
-// Placeholder implementations for older pre-5.9 kernels
+// Full featured (optional) implementations for [4.14..5.8) kernels
+
+DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
+                                    AID_ROOT, AID_NETWORK_STACK,
+                                    sched_cls_tether_downstream4_ether_opt,
+                                    KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ true);
+}
+
+DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
+                                    AID_ROOT, AID_NETWORK_STACK,
+                                    sched_cls_tether_downstream4_rawip_opt,
+                                    KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ true);
+}
+
+DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
+                                    AID_ROOT, AID_NETWORK_STACK,
+                                    sched_cls_tether_upstream4_ether_opt,
+                                    KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ true);
+}
+
+DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
+                                    AID_ROOT, AID_NETWORK_STACK,
+                                    sched_cls_tether_upstream4_rawip_opt,
+                                    KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ true);
+}
+
+// Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels.
+// These will be loaded only if the above optional ones failed (loading of *these* must succeed).
+//
+// [Note: as a result TCP connections will not have their conntrack timeout refreshed, however,
+// since /proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established defaults to 432000 (seconds),
+// this in practice means they'll break only after 5 days.  This seems an acceptable trade-off.
+//
+// Additionally kernel/tests change "net-test: add bpf_ktime_get_ns / bpf_ktime_get_boot_ns tests"
+// which enforces and documents the required kernel cherrypicks will make it pretty unlikely that
+// many devices upgrading to S will end up relying on these fallback programs.
+
+DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
+                           sched_cls_tether_downstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ false);
+}
+
+DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14", AID_ROOT, AID_NETWORK_STACK,
+                           sched_cls_tether_downstream4_rawip_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ false);
+}
+
+DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
+                           sched_cls_tether_upstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ false);
+}
+
+DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14", AID_ROOT, AID_NETWORK_STACK,
+                           sched_cls_tether_upstream4_rawip_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+(struct __sk_buff* skb) {
+    return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ false);
+}
+
+// Placeholder (no-op) implementations for older pre-4.14 kernels
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
-                           sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
+                           sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
 (struct __sk_buff* skb) {
     return TC_ACT_OK;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
-                           sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
+                           sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(4, 14, 0))
 (struct __sk_buff* skb) {
     return TC_ACT_OK;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
-                           sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
+                           sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
 (struct __sk_buff* skb) {
     return TC_ACT_OK;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
-                           sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
+                           sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(4, 14, 0))
 (struct __sk_buff* skb) {
     return TC_ACT_OK;
 }
diff --git a/tests/cts/net/src/android/net/cts/CaptivePortalTest.kt b/tests/cts/net/src/android/net/cts/CaptivePortalTest.kt
index eb5048f..a889c41 100644
--- a/tests/cts/net/src/android/net/cts/CaptivePortalTest.kt
+++ b/tests/cts/net/src/android/net/cts/CaptivePortalTest.kt
@@ -26,6 +26,9 @@
 import android.net.Network
 import android.net.NetworkCapabilities
 import android.net.NetworkCapabilities.NET_CAPABILITY_CAPTIVE_PORTAL
+import android.net.NetworkCapabilities.NET_CAPABILITY_INTERNET
+import android.net.NetworkCapabilities.NET_CAPABILITY_VALIDATED
+import android.net.NetworkCapabilities.TRANSPORT_CELLULAR
 import android.net.NetworkCapabilities.TRANSPORT_WIFI
 import android.net.NetworkRequest
 import android.net.Uri
@@ -44,8 +47,10 @@
 import android.text.TextUtils
 import androidx.test.platform.app.InstrumentationRegistry.getInstrumentation
 import androidx.test.runner.AndroidJUnit4
+import com.android.testutils.RecorderCallback
 import com.android.testutils.TestHttpServer
 import com.android.testutils.TestHttpServer.Request
+import com.android.testutils.TestableNetworkCallback
 import com.android.testutils.isDevSdkInRange
 import com.android.testutils.runAsShell
 import fi.iki.elonen.NanoHTTPD.Response.Status
@@ -124,7 +129,20 @@
         assumeTrue(pm.hasSystemFeature(FEATURE_TELEPHONY))
         assumeTrue(pm.hasSystemFeature(FEATURE_WIFI))
         utils.ensureWifiConnected()
-        utils.connectToCell()
+        val cellNetwork = utils.connectToCell()
+
+        // Verify cell network is validated
+        val cellReq = NetworkRequest.Builder()
+                .addTransportType(TRANSPORT_CELLULAR)
+                .addCapability(NET_CAPABILITY_INTERNET)
+                .build()
+        val cellCb = TestableNetworkCallback(timeoutMs = TEST_TIMEOUT_MS)
+        cm.registerNetworkCallback(cellReq, cellCb)
+        val cb = cellCb.eventuallyExpectOrNull<RecorderCallback.CallbackEntry.CapabilitiesChanged> {
+            it.network == cellNetwork && it.caps.hasCapability(NET_CAPABILITY_VALIDATED)
+        }
+        assertNotNull(cb, "Mobile network $cellNetwork has no access to the internet. " +
+                "Check the mobile data connection.")
 
         // Have network validation use a local server that serves a HTTPS error / HTTP redirect
         server.addResponse(Request(TEST_PORTAL_URL_PATH), Status.OK,
@@ -135,7 +153,8 @@
         setHttpsUrlDeviceConfig(makeUrl(TEST_HTTPS_URL_PATH))
         setHttpUrlDeviceConfig(makeUrl(TEST_HTTP_URL_PATH))
         // URL expiration needs to be in the next 10 minutes
-        setUrlExpirationDeviceConfig(System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(9))
+        assertTrue(WIFI_CONNECT_TIMEOUT_MS < TimeUnit.MINUTES.toMillis(10))
+        setUrlExpirationDeviceConfig(System.currentTimeMillis() + WIFI_CONNECT_TIMEOUT_MS)
 
         // Wait for a captive portal to be detected on the network
         val wifiNetworkFuture = CompletableFuture<Network>()
diff --git a/tests/cts/net/src/android/net/cts/NetworkAgentTest.kt b/tests/cts/net/src/android/net/cts/NetworkAgentTest.kt
index 41537a9..aea33ca 100644
--- a/tests/cts/net/src/android/net/cts/NetworkAgentTest.kt
+++ b/tests/cts/net/src/android/net/cts/NetworkAgentTest.kt
@@ -35,7 +35,6 @@
 import android.net.NetworkCapabilities.NET_CAPABILITY_NOT_METERED
 import android.net.NetworkCapabilities.NET_CAPABILITY_NOT_ROAMING
 import android.net.NetworkCapabilities.NET_CAPABILITY_NOT_SUSPENDED
-import android.net.NetworkCapabilities.NET_CAPABILITY_NOT_VCN_MANAGED
 import android.net.NetworkCapabilities.NET_CAPABILITY_NOT_VPN
 import android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED
 import android.net.NetworkCapabilities.NET_CAPABILITY_TRUSTED
@@ -66,19 +65,17 @@
 import android.os.Message
 import android.util.DebugUtils.valueToString
 import androidx.test.InstrumentationRegistry
-import androidx.test.runner.AndroidJUnit4
 import com.android.connectivity.aidl.INetworkAgent
 import com.android.connectivity.aidl.INetworkAgentRegistry
 import com.android.net.module.util.ArrayTrackRecord
-import com.android.testutils.DevSdkIgnoreRule
 import com.android.testutils.DevSdkIgnoreRule.IgnoreUpTo
+import com.android.testutils.DevSdkIgnoreRunner
 import com.android.testutils.RecorderCallback.CallbackEntry.Available
 import com.android.testutils.RecorderCallback.CallbackEntry.Lost
 import com.android.testutils.TestableNetworkCallback
 import org.junit.After
 import org.junit.Assert.assertArrayEquals
 import org.junit.Before
-import org.junit.Rule
 import org.junit.Test
 import org.junit.runner.RunWith
 import org.mockito.ArgumentMatchers.any
@@ -126,11 +123,11 @@
     it.obj = obj
 }
 
-@RunWith(AndroidJUnit4::class)
+@RunWith(DevSdkIgnoreRunner::class)
+// NetworkAgent is not updatable in R-, so this test does not need to be compatible with older
+// versions. NetworkAgent was also based on AsyncChannel before S so cannot be tested the same way.
+@IgnoreUpTo(Build.VERSION_CODES.R)
 class NetworkAgentTest {
-    @Rule @JvmField
-    val ignoreRule = DevSdkIgnoreRule(ignoreClassUpTo = Build.VERSION_CODES.R)
-
     private val LOCAL_IPV4_ADDRESS = InetAddresses.parseNumericAddress("192.0.2.1")
     private val REMOTE_IPV4_ADDRESS = InetAddresses.parseNumericAddress("192.0.2.2")
 
@@ -324,7 +321,6 @@
             addCapability(NET_CAPABILITY_NOT_SUSPENDED)
             addCapability(NET_CAPABILITY_NOT_ROAMING)
             addCapability(NET_CAPABILITY_NOT_VPN)
-            addCapability(NET_CAPABILITY_NOT_VCN_MANAGED)
             if (null != name) {
                 setNetworkSpecifier(StringNetworkSpecifier(name))
             }
@@ -561,7 +557,6 @@
             addTransportType(TRANSPORT_TEST)
             addTransportType(TRANSPORT_VPN)
             removeCapability(NET_CAPABILITY_NOT_VPN)
-            addCapability(NET_CAPABILITY_NOT_VCN_MANAGED)
             setTransportInfo(VpnTransportInfo(VpnManager.TYPE_VPN_SERVICE))
         }
         val defaultNetwork = mCM.activeNetwork
diff --git a/tests/cts/net/src/android/net/cts/NetworkRequestTest.java b/tests/cts/net/src/android/net/cts/NetworkRequestTest.java
index 31dc64d..d118c8a 100644
--- a/tests/cts/net/src/android/net/cts/NetworkRequestTest.java
+++ b/tests/cts/net/src/android/net/cts/NetworkRequestTest.java
@@ -16,13 +16,8 @@
 
 package android.net.cts;
 
-import static android.net.NetworkCapabilities.NET_CAPABILITY_DUN;
-import static android.net.NetworkCapabilities.NET_CAPABILITY_FOTA;
 import static android.net.NetworkCapabilities.NET_CAPABILITY_INTERNET;
 import static android.net.NetworkCapabilities.NET_CAPABILITY_MMS;
-import static android.net.NetworkCapabilities.NET_CAPABILITY_NOT_ROAMING;
-import static android.net.NetworkCapabilities.NET_CAPABILITY_NOT_VCN_MANAGED;
-import static android.net.NetworkCapabilities.NET_CAPABILITY_SUPL;
 import static android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED;
 import static android.net.NetworkCapabilities.TRANSPORT_BLUETOOTH;
 import static android.net.NetworkCapabilities.TRANSPORT_CELLULAR;
@@ -34,7 +29,6 @@
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
-import android.annotation.NonNull;
 import android.net.MacAddress;
 import android.net.MatchAllNetworkSpecifier;
 import android.net.NetworkCapabilities;
@@ -49,7 +43,6 @@
 
 import androidx.test.runner.AndroidJUnit4;
 
-import com.android.modules.utils.build.SdkLevel;
 import com.android.testutils.DevSdkIgnoreRule;
 import com.android.testutils.DevSdkIgnoreRule.IgnoreUpTo;
 
@@ -159,44 +152,29 @@
                 .getRequestorPackageName());
     }
 
-    private void addNotVcnManagedCapability(@NonNull NetworkCapabilities nc) {
-        if (SdkLevel.isAtLeastS()) {
-            nc.addCapability(NET_CAPABILITY_NOT_VCN_MANAGED);
-        }
-    }
-
     @Test
     @IgnoreUpTo(Build.VERSION_CODES.Q)
     public void testCanBeSatisfiedBy() {
         final LocalNetworkSpecifier specifier1 = new LocalNetworkSpecifier(1234 /* id */);
         final LocalNetworkSpecifier specifier2 = new LocalNetworkSpecifier(5678 /* id */);
 
-        // Some requests are adding NOT_VCN_MANAGED capability automatically. Add it to the
-        // capabilities below for bypassing the check.
         final NetworkCapabilities capCellularMmsInternet = new NetworkCapabilities()
                 .addTransportType(TRANSPORT_CELLULAR)
                 .addCapability(NET_CAPABILITY_MMS)
                 .addCapability(NET_CAPABILITY_INTERNET);
-        addNotVcnManagedCapability(capCellularMmsInternet);
         final NetworkCapabilities capCellularVpnMmsInternet =
                 new NetworkCapabilities(capCellularMmsInternet).addTransportType(TRANSPORT_VPN);
-        addNotVcnManagedCapability(capCellularVpnMmsInternet);
         final NetworkCapabilities capCellularMmsInternetSpecifier1 =
                 new NetworkCapabilities(capCellularMmsInternet).setNetworkSpecifier(specifier1);
-        addNotVcnManagedCapability(capCellularMmsInternetSpecifier1);
         final NetworkCapabilities capVpnInternetSpecifier1 = new NetworkCapabilities()
                 .addCapability(NET_CAPABILITY_INTERNET)
                 .addTransportType(TRANSPORT_VPN)
                 .setNetworkSpecifier(specifier1);
-        addNotVcnManagedCapability(capVpnInternetSpecifier1);
         final NetworkCapabilities capCellularMmsInternetMatchallspecifier =
                 new NetworkCapabilities(capCellularMmsInternet)
-                        .setNetworkSpecifier(new MatchAllNetworkSpecifier());
-        addNotVcnManagedCapability(capCellularMmsInternetMatchallspecifier);
+                    .setNetworkSpecifier(new MatchAllNetworkSpecifier());
         final NetworkCapabilities capCellularMmsInternetSpecifier2 =
-                new NetworkCapabilities(capCellularMmsInternet)
-                        .setNetworkSpecifier(specifier2);
-        addNotVcnManagedCapability(capCellularMmsInternetSpecifier2);
+                new NetworkCapabilities(capCellularMmsInternet).setNetworkSpecifier(specifier2);
 
         final NetworkRequest requestCellularInternetSpecifier1 = new NetworkRequest.Builder()
                 .addTransportType(TRANSPORT_CELLULAR)
@@ -261,8 +239,7 @@
 
         final NetworkCapabilities capCellInternetBWSpecifier1Signal =
                 new NetworkCapabilities.Builder(capCellInternetBWSpecifier1)
-                        .setSignalStrength(-123).build();
-        addNotVcnManagedCapability(capCellInternetBWSpecifier1Signal);
+                    .setSignalStrength(-123).build();
         assertCorrectlySatisfies(true, requestCombination,
                 capCellInternetBWSpecifier1Signal);
 
@@ -296,75 +273,4 @@
         assertEquals(Process.INVALID_UID, new NetworkRequest.Builder()
                 .clearCapabilities().build().getRequestorUid());
     }
-
-    // TODO: 1. Refactor test cases with helper method.
-    //       2. Test capability that does not yet exist.
-    @Test @IgnoreUpTo(Build.VERSION_CODES.R)
-    public void testBypassingVcnForNonInternetRequest() {
-        // Make an empty request. Verify the NOT_VCN_MANAGED is added.
-        final NetworkRequest emptyRequest = new NetworkRequest.Builder().build();
-        assertTrue(emptyRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a request explicitly add NOT_VCN_MANAGED. Verify the NOT_VCN_MANAGED is preserved.
-        final NetworkRequest mmsAddNotVcnRequest = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_MMS)
-                .addCapability(NET_CAPABILITY_NOT_VCN_MANAGED)
-                .build();
-        assertTrue(mmsAddNotVcnRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Similar to above, but the opposite order.
-        final NetworkRequest mmsAddNotVcnRequest2 = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_NOT_VCN_MANAGED)
-                .addCapability(NET_CAPABILITY_MMS)
-                .build();
-        assertTrue(mmsAddNotVcnRequest2.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a request explicitly remove NOT_VCN_MANAGED. Verify the NOT_VCN_MANAGED is removed.
-        final NetworkRequest removeNotVcnRequest = new NetworkRequest.Builder()
-                .removeCapability(NET_CAPABILITY_NOT_VCN_MANAGED).build();
-        assertFalse(removeNotVcnRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a request add some capability inside VCN supported capabilities.
-        // Verify the NOT_VCN_MANAGED is added.
-        final NetworkRequest notRoamRequest = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_NOT_ROAMING).build();
-        assertTrue(notRoamRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a internet request. Verify the NOT_VCN_MANAGED is added.
-        final NetworkRequest internetRequest = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_INTERNET).build();
-        assertTrue(internetRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a internet request which explicitly removed NOT_VCN_MANAGED.
-        // Verify the NOT_VCN_MANAGED is removed.
-        final NetworkRequest internetRemoveNotVcnRequest = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_INTERNET)
-                .removeCapability(NET_CAPABILITY_NOT_VCN_MANAGED).build();
-        assertFalse(internetRemoveNotVcnRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a normal MMS request. Verify the request could bypass VCN.
-        final NetworkRequest mmsRequest =
-                new NetworkRequest.Builder().addCapability(NET_CAPABILITY_MMS).build();
-        assertFalse(mmsRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a SUPL request along with internet. Verify NOT_VCN_MANAGED is not added since
-        // SUPL is not in the supported list.
-        final NetworkRequest suplWithInternetRequest = new NetworkRequest.Builder()
-                        .addCapability(NET_CAPABILITY_SUPL)
-                        .addCapability(NET_CAPABILITY_INTERNET).build();
-        assertFalse(suplWithInternetRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a FOTA request with explicitly add NOT_VCN_MANAGED capability. Verify
-        // NOT_VCN_MANAGED is preserved.
-        final NetworkRequest fotaRequest = new NetworkRequest.Builder()
-                        .addCapability(NET_CAPABILITY_FOTA)
-                        .addCapability(NET_CAPABILITY_NOT_VCN_MANAGED).build();
-        assertTrue(fotaRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-
-        // Make a DUN request, which is in {@code VCN_SUPPORTED_CAPABILITIES}.
-        // Verify NOT_VCN_MANAGED is preserved.
-        final NetworkRequest dunRequest = new NetworkRequest.Builder()
-                .addCapability(NET_CAPABILITY_DUN).build();
-        assertTrue(dunRequest.hasCapability(NET_CAPABILITY_NOT_VCN_MANAGED));
-    }
 }