Merge "fs_mgr: SkipMountingPartitions() support glob patterns"
diff --git a/fs_mgr/fs_mgr_fstab.cpp b/fs_mgr/fs_mgr_fstab.cpp
index 31ef3b5..ad48dd1 100644
--- a/fs_mgr/fs_mgr_fstab.cpp
+++ b/fs_mgr/fs_mgr_fstab.cpp
@@ -681,7 +681,7 @@
}
}
-bool ReadFstabFromFile(const std::string& path, Fstab* fstab) {
+bool ReadFstabFromFile(const std::string& path, Fstab* fstab_out) {
auto fstab_file = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (!fstab_file) {
PERROR << __FUNCTION__ << "(): cannot open file: '" << path << "'";
@@ -690,31 +690,43 @@
bool is_proc_mounts = path == "/proc/mounts";
- if (!ReadFstabFile(fstab_file.get(), is_proc_mounts, fstab)) {
+ Fstab fstab;
+ if (!ReadFstabFile(fstab_file.get(), is_proc_mounts, &fstab)) {
LERROR << __FUNCTION__ << "(): failed to load fstab from : '" << path << "'";
return false;
}
- if (!is_proc_mounts && !access(android::gsi::kGsiBootedIndicatorFile, F_OK)) {
- // This is expected to fail if host is android Q, since Q doesn't
- // support DSU slotting. The DSU "active" indicator file would be
- // non-existent or empty if DSU is enabled within the guest system.
- // In that case, just use the default slot name "dsu".
- std::string dsu_slot;
- if (!android::gsi::GetActiveDsu(&dsu_slot)) {
- PWARNING << __FUNCTION__ << "(): failed to get active dsu slot";
+ if (!is_proc_mounts) {
+ if (!access(android::gsi::kGsiBootedIndicatorFile, F_OK)) {
+ // This is expected to fail if host is android Q, since Q doesn't
+ // support DSU slotting. The DSU "active" indicator file would be
+ // non-existent or empty if DSU is enabled within the guest system.
+ // In that case, just use the default slot name "dsu".
+ std::string dsu_slot;
+ if (!android::gsi::GetActiveDsu(&dsu_slot) && errno != ENOENT) {
+ PERROR << __FUNCTION__ << "(): failed to get active DSU slot";
+ return false;
+ }
+ if (dsu_slot.empty()) {
+ dsu_slot = "dsu";
+ LWARNING << __FUNCTION__ << "(): assuming default DSU slot: " << dsu_slot;
+ }
+ // This file is non-existent on Q vendor.
+ std::string lp_names;
+ if (!ReadFileToString(gsi::kGsiLpNamesFile, &lp_names) && errno != ENOENT) {
+ PERROR << __FUNCTION__ << "(): failed to read DSU LP names";
+ return false;
+ }
+ TransformFstabForDsu(&fstab, dsu_slot, Split(lp_names, ","));
+ } else if (errno != ENOENT) {
+ PERROR << __FUNCTION__ << "(): failed to access() DSU booted indicator";
+ return false;
}
- if (dsu_slot.empty()) {
- dsu_slot = "dsu";
- }
-
- std::string lp_names;
- ReadFileToString(gsi::kGsiLpNamesFile, &lp_names);
- TransformFstabForDsu(fstab, dsu_slot, Split(lp_names, ","));
}
- SkipMountingPartitions(fstab, false /* verbose */);
- EnableMandatoryFlags(fstab);
+ SkipMountingPartitions(&fstab, false /* verbose */);
+ EnableMandatoryFlags(&fstab);
+ *fstab_out = std::move(fstab);
return true;
}
@@ -799,10 +811,8 @@
// Loads the fstab file and combines with fstab entries passed in from device tree.
bool ReadDefaultFstab(Fstab* fstab) {
- Fstab dt_fstab;
- ReadFstabFromDt(&dt_fstab, false /* verbose */);
-
- *fstab = std::move(dt_fstab);
+ fstab->clear();
+ ReadFstabFromDt(fstab, false /* verbose */);
std::string default_fstab_path;
// Use different fstab paths for normal boot and recovery boot, respectively
@@ -813,16 +823,14 @@
}
Fstab default_fstab;
- if (!default_fstab_path.empty()) {
- ReadFstabFromFile(default_fstab_path, &default_fstab);
+ if (!default_fstab_path.empty() && ReadFstabFromFile(default_fstab_path, &default_fstab)) {
+ for (auto&& entry : default_fstab) {
+ fstab->emplace_back(std::move(entry));
+ }
} else {
LINFO << __FUNCTION__ << "(): failed to find device default fstab";
}
- for (auto&& entry : default_fstab) {
- fstab->emplace_back(std::move(entry));
- }
-
return !fstab->empty();
}
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_client.h b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_client.h
index 1dab361..280e857 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_client.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_client.h
@@ -69,6 +69,8 @@
// must ONLY be called if the control device has already been deleted.
bool WaitForDeviceDelete(const std::string& control_device);
+ void CloseConnection() { sockfd_ = {}; }
+
// Detach snapuserd. This shuts down the listener socket, and will cause
// snapuserd to gracefully exit once all handler threads have terminated.
// This should only be used on first-stage instances of snapuserd.
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index 2c5bf75..a0a1e4f 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -2297,6 +2297,17 @@
return false;
}
}
+
+ // Terminate the daemon and release the snapuserd_client_ object.
+ // If we need to re-connect with the daemon, EnsureSnapuserdConnected()
+ // will re-create the object and establish the socket connection.
+ if (snapuserd_client_) {
+ LOG(INFO) << "Shutdown snapuserd daemon";
+ snapuserd_client_->DetachSnapuserd();
+ snapuserd_client_->CloseConnection();
+ snapuserd_client_ = nullptr;
+ }
+
return true;
}
diff --git a/fs_mgr/libsnapshot/snapshot_test.cpp b/fs_mgr/libsnapshot/snapshot_test.cpp
index 6ed0129..8fae00b 100644
--- a/fs_mgr/libsnapshot/snapshot_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_test.cpp
@@ -2019,6 +2019,8 @@
// Read bytes back and verify they match the cache.
ASSERT_TRUE(IsPartitionUnchanged("sys_b"));
+
+ ASSERT_TRUE(sm->UnmapAllSnapshots());
}
TEST_F(SnapshotUpdateTest, CancelOnTargetSlot) {
diff --git a/init/ueventd_parser.cpp b/init/ueventd_parser.cpp
index cab988b..2221228 100644
--- a/init/ueventd_parser.cpp
+++ b/init/ueventd_parser.cpp
@@ -106,10 +106,10 @@
}
if (std::find_if(external_firmware_handlers->begin(), external_firmware_handlers->end(),
- [&args](const auto& other) { return other.devpath == args[2]; }) !=
+ [&args](const auto& other) { return other.devpath == args[1]; }) !=
external_firmware_handlers->end()) {
return Error() << "found a previous external_firmware_handler with the same devpath, '"
- << args[2] << "'";
+ << args[1] << "'";
}
passwd* pwd = getpwnam(args[2].c_str());
diff --git a/init/ueventd_parser_test.cpp b/init/ueventd_parser_test.cpp
index b604c53..4e63ba5 100644
--- a/init/ueventd_parser_test.cpp
+++ b/init/ueventd_parser_test.cpp
@@ -45,6 +45,13 @@
EXPECT_EQ(expected.attribute_, test.attribute_);
}
+void TestExternalFirmwareHandler(const ExternalFirmwareHandler& expected,
+ const ExternalFirmwareHandler& test) {
+ EXPECT_EQ(expected.devpath, test.devpath) << expected.devpath;
+ EXPECT_EQ(expected.uid, test.uid) << expected.uid;
+ EXPECT_EQ(expected.handler_path, test.handler_path) << expected.handler_path;
+}
+
template <typename T, typename F>
void TestVector(const T& expected, const T& test, F function) {
ASSERT_EQ(expected.size(), test.size());
@@ -67,6 +74,8 @@
TestVector(expected.sysfs_permissions, result.sysfs_permissions, TestSysfsPermissions);
TestVector(expected.dev_permissions, result.dev_permissions, TestPermissions);
EXPECT_EQ(expected.firmware_directories, result.firmware_directories);
+ TestVector(expected.external_firmware_handlers, result.external_firmware_handlers,
+ TestExternalFirmwareHandler);
}
TEST(ueventd_parser, EmptyFile) {
@@ -144,7 +153,7 @@
auto ueventd_file = R"(
external_firmware_handler devpath root handler_path
external_firmware_handler /devices/path/firmware/something001.bin system /vendor/bin/firmware_handler.sh
-external_firmware_handler /devices/path/firmware/something001.bin radio "/vendor/bin/firmware_handler.sh --has --arguments"
+external_firmware_handler /devices/path/firmware/something002.bin radio "/vendor/bin/firmware_handler.sh --has --arguments"
)";
auto external_firmware_handlers = std::vector<ExternalFirmwareHandler>{
@@ -159,7 +168,7 @@
"/vendor/bin/firmware_handler.sh",
},
{
- "/devices/path/firmware/something001.bin",
+ "/devices/path/firmware/something002.bin",
AID_RADIO,
"/vendor/bin/firmware_handler.sh --has --arguments",
},
diff --git a/libstats/pull_rust/Android.bp b/libstats/pull_rust/Android.bp
new file mode 100644
index 0000000..354c7b3
--- /dev/null
+++ b/libstats/pull_rust/Android.bp
@@ -0,0 +1,59 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_bindgen {
+ name: "libstatspull_bindgen",
+ wrapper_src: "statslog.h",
+ crate_name: "statspull_bindgen",
+ source_stem: "bindings",
+ bindgen_flags: [
+ "--size_t-is-usize",
+ "--whitelist-function=AStatsEventList_addStatsEvent",
+ "--whitelist-function=AStatsEvent_.*",
+ "--whitelist-function=AStatsManager_.*",
+ "--whitelist-var=AStatsManager_.*",
+ ],
+ target: {
+ android: {
+ shared_libs: [
+ "libstatspull",
+ "libstatssocket",
+ ],
+ },
+ host: {
+ static_libs: [
+ "libstatspull",
+ "libstatssocket",
+ ],
+ },
+ },
+}
+
+rust_library {
+ name: "libstatspull_rust",
+ crate_name: "statspull_rust",
+ srcs: ["stats_pull.rs"],
+ rustlibs: [
+ "liblazy_static",
+ "liblog_rust",
+ "libstatslog_rust_header",
+ "libstatspull_bindgen",
+ ],
+}
diff --git a/libstats/pull_rust/stats_pull.rs b/libstats/pull_rust/stats_pull.rs
new file mode 100644
index 0000000..174125e
--- /dev/null
+++ b/libstats/pull_rust/stats_pull.rs
@@ -0,0 +1,170 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! A Rust interface for the StatsD pull API.
+
+use lazy_static::lazy_static;
+use statslog_rust_header::{Atoms, Stat, StatsError};
+use statspull_bindgen::*;
+use std::collections::HashMap;
+use std::convert::TryInto;
+use std::os::raw::c_void;
+use std::sync::Mutex;
+
+/// The return value of callbacks.
+pub type StatsPullResult = Vec<Box<dyn Stat>>;
+
+/// A wrapper for AStatsManager_PullAtomMetadata.
+/// It calls AStatsManager_PullAtomMetadata_release on drop.
+pub struct Metadata {
+ metadata: *mut AStatsManager_PullAtomMetadata,
+}
+
+impl Metadata {
+ /// Calls AStatsManager_PullAtomMetadata_obtain.
+ pub fn new() -> Self {
+ // Safety: We panic if the memory allocation fails.
+ let metadata = unsafe { AStatsManager_PullAtomMetadata_obtain() };
+ if metadata.is_null() {
+ panic!("Cannot obtain pull atom metadata.");
+ } else {
+ Metadata { metadata }
+ }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_setCoolDownMillis.
+ pub fn set_cooldown_millis(&mut self, cooldown_millis: i64) {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe { AStatsManager_PullAtomMetadata_setCoolDownMillis(self.metadata, cooldown_millis) }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_getCoolDownMillis.
+ pub fn get_cooldown_millis(&self) -> i64 {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe { AStatsManager_PullAtomMetadata_getCoolDownMillis(self.metadata) }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_setTimeoutMillis.
+ pub fn set_timeout_millis(&mut self, timeout_millis: i64) {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe { AStatsManager_PullAtomMetadata_setTimeoutMillis(self.metadata, timeout_millis) }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_getTimeoutMillis.
+ pub fn get_timeout_millis(&self) -> i64 {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe { AStatsManager_PullAtomMetadata_getTimeoutMillis(self.metadata) }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_setAdditiveFields.
+ pub fn set_additive_fields(&mut self, additive_fields: &mut Vec<i32>) {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe {
+ AStatsManager_PullAtomMetadata_setAdditiveFields(
+ self.metadata,
+ additive_fields.as_mut_ptr(),
+ additive_fields.len().try_into().expect("Cannot convert length to i32"),
+ )
+ }
+ }
+
+ /// Calls AStatsManager_PullAtomMetadata_getAdditiveFields.
+ pub fn get_additive_fields(&self) -> Vec<i32> {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ // We call getNumAdditiveFields to ensure we pass getAdditiveFields a large enough array.
+ unsafe {
+ let num_fields = AStatsManager_PullAtomMetadata_getNumAdditiveFields(self.metadata)
+ .try_into()
+ .expect("Cannot convert num additive fields to usize");
+ let mut fields = vec![0; num_fields];
+ AStatsManager_PullAtomMetadata_getAdditiveFields(self.metadata, fields.as_mut_ptr());
+ fields
+ }
+ }
+}
+
+impl Drop for Metadata {
+ fn drop(&mut self) {
+ // Safety: Metadata::new ensures that self.metadata is a valid object.
+ unsafe { AStatsManager_PullAtomMetadata_release(self.metadata) }
+ }
+}
+
+impl Default for Metadata {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+lazy_static! {
+ static ref COOKIES: Mutex<HashMap<i32, fn() -> StatsPullResult>> = Mutex::new(HashMap::new());
+}
+
+// Safety: We store our callbacks in the global so they are valid.
+unsafe extern "C" fn callback_wrapper(
+ atom_tag: i32,
+ data: *mut AStatsEventList,
+ _cookie: *mut c_void,
+) -> AStatsManager_PullAtomCallbackReturn {
+ if !data.is_null() {
+ let map = COOKIES.lock().unwrap();
+ let cb = map.get(&atom_tag);
+ match cb {
+ None => log::error!("No callback found for {}", atom_tag),
+ Some(cb) => {
+ let stats = cb();
+ let result = stats
+ .iter()
+ .map(|stat| stat.add_astats_event(&mut *data))
+ .collect::<Result<Vec<()>, StatsError>>();
+ match result {
+ Ok(_) => {
+ return AStatsManager_PULL_SUCCESS as AStatsManager_PullAtomCallbackReturn
+ }
+ _ => log::error!("Error adding astats events: {:?}", result),
+ }
+ }
+ }
+ }
+ AStatsManager_PULL_SKIP as AStatsManager_PullAtomCallbackReturn
+}
+
+/// Rust wrapper for AStatsManager_setPullAtomCallback.
+pub fn set_pull_atom_callback(
+ atom: Atoms,
+ metadata: Option<&Metadata>,
+ callback: fn() -> StatsPullResult,
+) {
+ COOKIES.lock().unwrap().insert(atom as i32, callback);
+ let metadata_raw = match metadata {
+ Some(m) => m.metadata,
+ None => std::ptr::null_mut(),
+ };
+ // Safety: We pass a valid function as the callback.
+ unsafe {
+ AStatsManager_setPullAtomCallback(
+ atom as i32,
+ metadata_raw,
+ Some(callback_wrapper),
+ std::ptr::null_mut(),
+ );
+ }
+}
+
+/// Rust wrapper for AStatsManager_clearPullAtomCallback.
+pub fn clear_pull_atom_callback(atom: Atoms) {
+ COOKIES.lock().unwrap().remove(&(atom as i32));
+ // Safety: No memory allocations.
+ unsafe { AStatsManager_clearPullAtomCallback(atom as i32) }
+}
diff --git a/libstats/pull_rust/statslog.h b/libstats/pull_rust/statslog.h
new file mode 100644
index 0000000..983fb7b
--- /dev/null
+++ b/libstats/pull_rust/statslog.h
@@ -0,0 +1,3 @@
+#pragma once
+
+#include "stats_pull_atom_callback.h"
diff --git a/libutils/RefBase.cpp b/libutils/RefBase.cpp
index 8e45226..b57e287 100644
--- a/libutils/RefBase.cpp
+++ b/libutils/RefBase.cpp
@@ -443,6 +443,20 @@
refs->mBase->onFirstRef();
}
+void RefBase::incStrongRequireStrong(const void* id) const {
+ weakref_impl* const refs = mRefs;
+ refs->incWeak(id);
+
+ refs->addStrongRef(id);
+ const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
+
+ LOG_ALWAYS_FATAL_IF(c <= 0 || c == INITIAL_STRONG_VALUE,
+ "incStrongRequireStrong() called on %p which isn't already owned", refs);
+#if PRINT_REFS
+ ALOGD("incStrong (requiring strong) of %p from %p: cnt=%d\n", this, id, c);
+#endif
+}
+
void RefBase::decStrong(const void* id) const
{
weakref_impl* const refs = mRefs;
@@ -521,6 +535,14 @@
ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this);
}
+void RefBase::weakref_type::incWeakRequireWeak(const void* id)
+{
+ weakref_impl* const impl = static_cast<weakref_impl*>(this);
+ impl->addWeakRef(id);
+ const int32_t c __unused = impl->mWeak.fetch_add(1,
+ std::memory_order_relaxed);
+ LOG_ALWAYS_FATAL_IF(c <= 0, "incWeakRequireWeak called on %p which has no weak refs", this);
+}
void RefBase::weakref_type::decWeak(const void* id)
{
diff --git a/libutils/RefBase_test.cpp b/libutils/RefBase_test.cpp
index c9b4894..dcc469e 100644
--- a/libutils/RefBase_test.cpp
+++ b/libutils/RefBase_test.cpp
@@ -241,6 +241,30 @@
ASSERT_FALSE(wp1 != wp2);
}
+TEST(RefBase, AssertWeakRefExistsSuccess) {
+ // uses some other refcounting method, or non at all
+ bool isDeleted;
+ sp<Foo> foo = sp<Foo>::make(&isDeleted);
+ wp<Foo> weakFoo = foo;
+
+ EXPECT_EQ(weakFoo, wp<Foo>::fromExisting(foo.get()));
+
+ EXPECT_FALSE(isDeleted);
+ foo = nullptr;
+ EXPECT_TRUE(isDeleted);
+}
+
+TEST(RefBase, AssertWeakRefExistsDeath) {
+ // uses some other refcounting method, or non at all
+ bool isDeleted;
+ Foo* foo = new Foo(&isDeleted);
+
+ // can only get a valid wp<> object when you construct it from an sp<>
+ EXPECT_DEATH(wp<Foo>::fromExisting(foo), "");
+
+ delete foo;
+}
+
// Set up a situation in which we race with visit2AndRremove() to delete
// 2 strong references. Bar destructor checks that there are no early
// deletions and prior updates are visible to destructor.
diff --git a/libutils/StrongPointer_test.cpp b/libutils/StrongPointer_test.cpp
index d37c1de..29f6bd4 100644
--- a/libutils/StrongPointer_test.cpp
+++ b/libutils/StrongPointer_test.cpp
@@ -21,8 +21,8 @@
using namespace android;
-class SPFoo : public LightRefBase<SPFoo> {
-public:
+class SPFoo : virtual public RefBase {
+ public:
explicit SPFoo(bool* deleted_check) : mDeleted(deleted_check) {
*mDeleted = false;
}
@@ -69,3 +69,14 @@
ASSERT_NE(nullptr, foo);
ASSERT_NE(foo, nullptr);
}
+
+TEST(StrongPointer, AssertStrongRefExists) {
+ // uses some other refcounting method, or non at all
+ bool isDeleted;
+ SPFoo* foo = new SPFoo(&isDeleted);
+
+ // can only get a valid sp<> object when you construct it as an sp<> object
+ EXPECT_DEATH(sp<SPFoo>::fromExisting(foo), "");
+
+ delete foo;
+}
diff --git a/libutils/include/utils/RefBase.h b/libutils/include/utils/RefBase.h
index e7acd17..5a5bd56 100644
--- a/libutils/include/utils/RefBase.h
+++ b/libutils/include/utils/RefBase.h
@@ -140,7 +140,9 @@
// count, and accidentally passed to f(sp<T>), a strong pointer to the object
// will be temporarily constructed and destroyed, prematurely deallocating the
// object, and resulting in heap corruption. None of this would be easily
-// visible in the source.
+// visible in the source. See below on
+// ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION for a compile time
+// option which helps avoid this case.
// Extra Features:
@@ -167,6 +169,42 @@
// to THE SAME sp<> or wp<>. In effect, their thread-safety properties are
// exactly like those of T*, NOT atomic<T*>.
+// Safety option: ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION
+//
+// This flag makes the semantics for using a RefBase object with wp<> and sp<>
+// much stricter by disabling implicit conversion from raw pointers to these
+// objects. In order to use this, apply this flag in Android.bp like so:
+//
+// cflags: [
+// "-DANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION",
+// ],
+//
+// REGARDLESS of whether this flag is on, best usage of sp<> is shown below. If
+// this flag is on, no other usage is possible (directly calling RefBase methods
+// is possible, but seeing code using 'incStrong' instead of 'sp<>', for
+// instance, should already set off big alarm bells. With carefully constructed
+// data structures, it should NEVER be necessary to directly use RefBase
+// methods). Proper RefBase usage:
+//
+// class Foo : virtual public RefBase { ... };
+//
+// // always construct an sp object with sp::make
+// sp<Foo> myFoo = sp<Foo>::make(/*args*/);
+//
+// // if you need a weak pointer, it must be constructed from a strong
+// // pointer
+// wp<Foo> weakFoo = myFoo; // NOT myFoo.get()
+//
+// // If you are inside of a method of Foo and need access to a strong
+// // explicitly call this function. This documents your intention to code
+// // readers, and it will give a runtime error for what otherwise would
+// // be potential double ownership
+// .... Foo::someMethod(...) {
+// // asserts if there is a memory issue
+// sp<Foo> thiz = sp<Foo>::fromExisting(this);
+// }
+//
+
#ifndef ANDROID_REF_BASE_H
#define ANDROID_REF_BASE_H
@@ -244,6 +282,7 @@
{
public:
void incStrong(const void* id) const;
+ void incStrongRequireStrong(const void* id) const;
void decStrong(const void* id) const;
void forceIncStrong(const void* id) const;
@@ -257,6 +296,7 @@
RefBase* refBase() const;
void incWeak(const void* id);
+ void incWeakRequireWeak(const void* id);
void decWeak(const void* id);
// acquires a strong reference if there is already one.
@@ -365,10 +405,24 @@
inline wp() : m_ptr(nullptr), m_refs(nullptr) { }
+ // if nullptr, returns nullptr
+ //
+ // if a weak pointer is already available, this will retrieve it,
+ // otherwise, this will abort
+ static inline wp<T> fromExisting(T* other);
+
+ // for more information about this flag, see above
+#if defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
+ wp(std::nullptr_t) : wp() {}
+#else
wp(T* other); // NOLINT(implicit)
+#endif
wp(const wp<T>& other);
explicit wp(const sp<T>& other);
+
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename U> wp(U* other); // NOLINT(implicit)
+#endif
template<typename U> wp(const sp<U>& other); // NOLINT(implicit)
template<typename U> wp(const wp<U>& other); // NOLINT(implicit)
@@ -376,11 +430,15 @@
// Assignment
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
wp& operator = (T* other);
+#endif
wp& operator = (const wp<T>& other);
wp& operator = (const sp<T>& other);
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename U> wp& operator = (U* other);
+#endif
template<typename U> wp& operator = (const wp<U>& other);
template<typename U> wp& operator = (const sp<U>& other);
@@ -481,12 +539,26 @@
// Note that the above comparison operations go out of their way to provide an ordering consistent
// with ordinary pointer comparison; otherwise they could ignore m_ptr, and just compare m_refs.
+template <typename T>
+wp<T> wp<T>::fromExisting(T* other) {
+ if (!other) return nullptr;
+
+ auto refs = other->getWeakRefs();
+ refs->incWeakRequireWeak(other);
+
+ wp<T> ret;
+ ret.m_refs = refs;
+ return ret;
+}
+
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T>
wp<T>::wp(T* other)
: m_ptr(other)
{
m_refs = other ? m_refs = other->createWeak(this) : nullptr;
}
+#endif
template<typename T>
wp<T>::wp(const wp<T>& other)
@@ -502,12 +574,14 @@
m_refs = m_ptr ? m_ptr->createWeak(this) : nullptr;
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T> template<typename U>
wp<T>::wp(U* other)
: m_ptr(other)
{
m_refs = other ? other->createWeak(this) : nullptr;
}
+#endif
template<typename T> template<typename U>
wp<T>::wp(const wp<U>& other)
@@ -534,6 +608,7 @@
if (m_ptr) m_refs->decWeak(this);
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T>
wp<T>& wp<T>::operator = (T* other)
{
@@ -544,6 +619,7 @@
m_refs = newRefs;
return *this;
}
+#endif
template<typename T>
wp<T>& wp<T>::operator = (const wp<T>& other)
@@ -569,6 +645,7 @@
return *this;
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T> template<typename U>
wp<T>& wp<T>::operator = (U* other)
{
@@ -579,6 +656,7 @@
m_refs = newRefs;
return *this;
}
+#endif
template<typename T> template<typename U>
wp<T>& wp<T>::operator = (const wp<U>& other)
diff --git a/libutils/include/utils/StrongPointer.h b/libutils/include/utils/StrongPointer.h
index 11128f2..1f07052 100644
--- a/libutils/include/utils/StrongPointer.h
+++ b/libutils/include/utils/StrongPointer.h
@@ -32,16 +32,43 @@
public:
inline sp() : m_ptr(nullptr) { }
- // TODO: switch everyone to using this over new, and make RefBase operator
- // new private to that class so that we can avoid RefBase being used with
- // other memory management mechanisms.
+ // The old way of using sp<> was like this. This is bad because it relies
+ // on implicit conversion to sp<>, which we would like to remove (if an
+ // object is being managed some other way, this is double-ownership). We
+ // want to move away from this:
+ //
+ // sp<Foo> foo = new Foo(...); // DO NOT DO THIS
+ //
+ // Instead, prefer to do this:
+ //
+ // sp<Foo> foo = sp<Foo>::make(...); // DO THIS
+ //
+ // Sometimes, in order to use this, when a constructor is marked as private,
+ // you may need to add this to your class:
+ //
+ // friend class sp<Foo>;
template <typename... Args>
static inline sp<T> make(Args&&... args);
+ // if nullptr, returns nullptr
+ //
+ // if a strong pointer is already available, this will retrieve it,
+ // otherwise, this will abort
+ static inline sp<T> fromExisting(T* other);
+
+ // for more information about this macro and correct RefBase usage, see
+ // the comment at the top of utils/RefBase.h
+#if defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
+ sp(std::nullptr_t) : sp() {}
+#else
sp(T* other); // NOLINT(implicit)
+#endif
sp(const sp<T>& other);
sp(sp<T>&& other) noexcept;
+
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename U> sp(U* other); // NOLINT(implicit)
+#endif
template<typename U> sp(const sp<U>& other); // NOLINT(implicit)
template<typename U> sp(sp<U>&& other); // NOLINT(implicit)
@@ -49,13 +76,17 @@
// Assignment
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
sp& operator = (T* other);
+#endif
sp& operator = (const sp<T>& other);
sp& operator=(sp<T>&& other) noexcept;
template<typename U> sp& operator = (const sp<U>& other);
template<typename U> sp& operator = (sp<U>&& other);
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename U> sp& operator = (U* other);
+#endif
//! Special optimization for use by ProcessState (and nobody else).
void force_set(T* other);
@@ -189,6 +220,19 @@
return result;
}
+template <typename T>
+sp<T> sp<T>::fromExisting(T* other) {
+ if (other) {
+ check_not_on_stack(other);
+ other->incStrongRequireStrong(other);
+ sp<T> result;
+ result.m_ptr = other;
+ return result;
+ }
+ return nullptr;
+}
+
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T>
sp<T>::sp(T* other)
: m_ptr(other) {
@@ -197,6 +241,7 @@
other->incStrong(this);
}
}
+#endif
template<typename T>
sp<T>::sp(const sp<T>& other)
@@ -210,6 +255,7 @@
other.m_ptr = nullptr;
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T> template<typename U>
sp<T>::sp(U* other)
: m_ptr(other) {
@@ -218,6 +264,7 @@
(static_cast<T*>(other))->incStrong(this);
}
}
+#endif
template<typename T> template<typename U>
sp<T>::sp(const sp<U>& other)
@@ -260,6 +307,7 @@
return *this;
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T>
sp<T>& sp<T>::operator =(T* other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
@@ -272,6 +320,7 @@
m_ptr = other;
return *this;
}
+#endif
template<typename T> template<typename U>
sp<T>& sp<T>::operator =(const sp<U>& other) {
@@ -294,6 +343,7 @@
return *this;
}
+#if !defined(ANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION)
template<typename T> template<typename U>
sp<T>& sp<T>::operator =(U* other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
@@ -303,6 +353,7 @@
m_ptr = other;
return *this;
}
+#endif
template<typename T>
void sp<T>::force_set(T* other) {