Merge "Use distinct GateKeeper userids when running a GSI."
diff --git a/adb/Android.bp b/adb/Android.bp
index 3813578..1e085a7 100644
--- a/adb/Android.bp
+++ b/adb/Android.bp
@@ -402,6 +402,14 @@
"liblog",
],
+ product_variables: {
+ debuggable: {
+ required: [
+ "remount",
+ ],
+ },
+ },
+
target: {
android: {
srcs: [
diff --git a/adb/benchmark_device.py b/adb/benchmark_device.py
index e56ef5a..4d0cf49 100755
--- a/adb/benchmark_device.py
+++ b/adb/benchmark_device.py
@@ -17,6 +17,8 @@
import os
import statistics
+import subprocess
+import tempfile
import time
import adb
@@ -56,6 +58,41 @@
msg = "%s: %d runs: median %.2f MiB/s, mean %.2f MiB/s, stddev: %.2f MiB/s"
print(msg % (name, len(speeds), median, mean, stddev))
+def benchmark_sink(device=None, size_mb=100):
+ if device == None:
+ device = adb.get_device()
+
+ speeds = list()
+ cmd = device.adb_cmd + ["raw", "sink:%d" % (size_mb * 1024 * 1024)]
+
+ with tempfile.TemporaryFile() as tmpfile:
+ tmpfile.truncate(size_mb * 1024 * 1024)
+
+ for _ in range(0, 10):
+ tmpfile.seek(0)
+ begin = time.time()
+ subprocess.check_call(cmd, stdin=tmpfile)
+ end = time.time()
+ speeds.append(size_mb / float(end - begin))
+
+ analyze("sink %dMiB" % size_mb, speeds)
+
+def benchmark_source(device=None, size_mb=100):
+ if device == None:
+ device = adb.get_device()
+
+ speeds = list()
+ cmd = device.adb_cmd + ["raw", "source:%d" % (size_mb * 1024 * 1024)]
+
+ with open(os.devnull, 'w') as devnull:
+ for _ in range(0, 10):
+ begin = time.time()
+ subprocess.check_call(cmd, stdout=devnull)
+ end = time.time()
+ speeds.append(size_mb / float(end - begin))
+
+ analyze("source %dMiB" % size_mb, speeds)
+
def benchmark_push(device=None, file_size_mb=100):
if device == None:
device = adb.get_device()
@@ -110,6 +147,8 @@
def main():
device = adb.get_device()
unlock(device)
+ benchmark_sink(device)
+ benchmark_source(device)
benchmark_push(device)
benchmark_pull(device)
diff --git a/adb/daemon/remount_service.cpp b/adb/daemon/remount_service.cpp
index 5e6d416..7999ddc 100644
--- a/adb/daemon/remount_service.cpp
+++ b/adb/daemon/remount_service.cpp
@@ -14,339 +14,81 @@
* limitations under the License.
*/
-#define TRACE_TAG ADB
-
-#include "sysdeps.h"
-
#include <errno.h>
#include <fcntl.h>
-#include <mntent.h>
-#include <spawn.h>
-#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
-#include <sys/mount.h>
-#include <sys/statvfs.h>
-#include <sys/vfs.h>
+#include <sys/types.h>
+#include <sys/wait.h>
#include <unistd.h>
-#include <memory>
-#include <set>
#include <string>
-#include <vector>
-
-#include <android-base/file.h>
-#include <android-base/properties.h>
-#include <bootloader_message/bootloader_message.h>
-#include <cutils/android_reboot.h>
-#include <fs_mgr.h>
-#include <fs_mgr_overlayfs.h>
#include "adb.h"
#include "adb_io.h"
#include "adb_unique_fd.h"
-#include "adb_utils.h"
-#include "set_verity_enable_state_service.h"
-
-using android::base::Realpath;
-using android::fs_mgr::Fstab;
-using android::fs_mgr::ReadDefaultFstab;
-
-// Returns the last device used to mount a directory in /proc/mounts.
-// This will find overlayfs entry where upperdir=lowerdir, to make sure
-// remount is associated with the correct directory.
-static std::string find_proc_mount(const char* dir) {
- std::unique_ptr<FILE, int(*)(FILE*)> fp(setmntent("/proc/mounts", "r"), endmntent);
- std::string mnt_fsname;
- if (!fp) return mnt_fsname;
-
- // dir might be a symlink, e.g., /product -> /system/product in GSI.
- std::string canonical_path;
- if (!Realpath(dir, &canonical_path)) {
- PLOG(ERROR) << "Realpath failed: " << dir;
- }
-
- mntent* e;
- while ((e = getmntent(fp.get())) != nullptr) {
- if (canonical_path == e->mnt_dir) {
- mnt_fsname = e->mnt_fsname;
- }
- }
- return mnt_fsname;
-}
-
-// Returns the device used to mount a directory in the fstab.
-static std::string find_fstab_mount(const char* dir) {
- Fstab fstab;
- if (!ReadDefaultFstab(&fstab)) {
- return "";
- }
-
- auto entry = std::find_if(fstab.begin(), fstab.end(),
- [&dir](const auto& entry) { return entry.mount_point == dir; });
- if (entry == fstab.end()) {
- return "";
- }
- if (entry->fs_mgr_flags.logical) {
- fs_mgr_update_logical_partition(&(*entry));
- }
- return entry->blk_device;
-}
-
-// The proc entry for / is full of lies, so check fstab instead.
-// /proc/mounts lists rootfs and /dev/root, neither of which is what we want.
-static std::string find_mount(const char* dir, bool is_root) {
- if (is_root) {
- return find_fstab_mount(dir);
- } else {
- return find_proc_mount(dir);
- }
-}
-
-bool dev_is_overlayfs(const std::string& dev) {
- return (dev == "overlay") || (dev == "overlayfs");
-}
-
-bool make_block_device_writable(const std::string& dev) {
- if (dev_is_overlayfs(dev)) return true;
- int fd = unix_open(dev, O_RDONLY | O_CLOEXEC);
- if (fd == -1) {
- return false;
- }
-
- int OFF = 0;
- bool result = (ioctl(fd, BLKROSET, &OFF) != -1);
- unix_close(fd);
- return result;
-}
-
-static bool can_unshare_blocks(int fd, const char* dev) {
- const char* E2FSCK_BIN = "/system/bin/e2fsck";
- if (access(E2FSCK_BIN, X_OK)) {
- WriteFdFmt(fd, "e2fsck is not available, cannot undo deduplication on %s\n", dev);
- return false;
- }
-
- pid_t child;
- char* env[] = {nullptr};
- const char* argv[] = {E2FSCK_BIN, "-n", "-E", "unshare_blocks", dev, nullptr};
- if (posix_spawn(&child, E2FSCK_BIN, nullptr, nullptr, const_cast<char**>(argv), env)) {
- WriteFdFmt(fd, "failed to e2fsck to check deduplication: %s\n", strerror(errno));
- return false;
- }
- int status = 0;
- int ret = TEMP_FAILURE_RETRY(waitpid(child, &status, 0));
- if (ret < 0) {
- WriteFdFmt(fd, "failed to get e2fsck status: %s\n", strerror(errno));
- return false;
- }
- if (!WIFEXITED(status)) {
- WriteFdFmt(fd, "e2fsck exited abnormally with status %d\n", status);
- return false;
- }
- int rc = WEXITSTATUS(status);
- if (rc != 0) {
- WriteFdFmt(fd,
- "%s is deduplicated, and an e2fsck check failed. It might not "
- "have enough free-space to be remounted as writable.\n",
- dev);
- return false;
- }
- return true;
-}
-
-static unsigned long get_mount_flags(int fd, const char* dir) {
- struct statvfs st_vfs;
- if (statvfs(dir, &st_vfs) == -1) {
- // Even though we could not get the original mount flags, assume that
- // the mount was originally read-only.
- WriteFdFmt(fd, "statvfs of the %s mount failed: %s.\n", dir, strerror(errno));
- return MS_RDONLY;
- }
- return st_vfs.f_flag;
-}
-
-static bool remount_partition(int fd, const char* dir) {
- if (!directory_exists(dir)) {
- return true;
- }
- bool is_root = strcmp(dir, "/") == 0;
- if (is_root && dev_is_overlayfs(find_mount("/system", false))) {
- dir = "/system";
- is_root = false;
- }
- std::string dev = find_mount(dir, is_root);
- if (is_root && dev.empty()) {
- // The fstab entry will be /system if the device switched roots during
- // first-stage init.
- dev = find_mount("/system", true);
- }
- // Even if the device for the root is not found, we still try to remount it
- // as rw. This typically only happens when running Android in a container:
- // the root will almost always be in a loop device, which is dynamic, so
- // it's not convenient to put in the fstab.
- if (dev.empty() && !is_root) {
- return true;
- }
- if (!dev.empty() && !make_block_device_writable(dev)) {
- WriteFdFmt(fd, "remount of %s failed; couldn't make block device %s writable: %s\n",
- dir, dev.c_str(), strerror(errno));
- return false;
- }
-
- unsigned long remount_flags = get_mount_flags(fd, dir);
- remount_flags &= ~MS_RDONLY;
- remount_flags |= MS_REMOUNT;
-
- if (mount(dev.c_str(), dir, "none", remount_flags | MS_BIND, nullptr) == -1) {
- // This is useful for cases where the superblock is already marked as
- // read-write, but the mount itself is read-only, such as containers
- // where the remount with just MS_REMOUNT is forbidden by the kernel.
- WriteFdFmt(fd, "remount of the %s mount failed: %s.\n", dir, strerror(errno));
- return false;
- }
- if (mount(dev.c_str(), dir, "none", MS_REMOUNT, nullptr) == -1) {
- WriteFdFmt(fd, "remount of the %s superblock failed: %s\n", dir, strerror(errno));
- return false;
- }
- return true;
-}
-
-static void reboot_for_remount(int fd, bool need_fsck) {
- std::string reboot_cmd = "reboot";
- if (need_fsck) {
- const std::vector<std::string> options = {"--fsck_unshare_blocks"};
- std::string err;
- if (!write_bootloader_message(options, &err)) {
- WriteFdFmt(fd, "Failed to set bootloader message: %s\n", err.c_str());
- return;
- }
-
- WriteFdExactly(fd,
- "The device will now reboot to recovery and attempt "
- "un-deduplication.\n");
- reboot_cmd = "reboot,recovery";
- }
-
- sync();
- android::base::SetProperty(ANDROID_RB_PROPERTY, reboot_cmd.c_str());
-}
-
-static void try_unmount_bionic(int fd) {
- static constexpr const char* kBionic = "/bionic";
- struct statfs buf;
- if (statfs(kBionic, &buf) == -1) {
- WriteFdFmt(fd, "statfs of the %s mount failed: %s.\n", kBionic, strerror(errno));
- return;
- }
- if (buf.f_flags & ST_RDONLY) {
- // /bionic is on a read-only partition; can happen for
- // non-system-as-root-devices. Don' try to unmount.
- return;
- }
- // Success/Fail of the actual remount will be reported by the function.
- remount_partition(fd, kBionic);
- return;
-}
void remount_service(unique_fd fd, const std::string& cmd) {
- bool user_requested_reboot = cmd == "-R";
+ static constexpr char remount_cmd[] = "/system/bin/remount";
+ static constexpr char remount_failed[] = "remount failed\n";
if (getuid() != 0) {
WriteFdExactly(fd.get(), "Not running as root. Try \"adb root\" first.\n");
+ WriteFdExactly(fd.get(), remount_failed);
return;
}
- bool system_verified = !(android::base::GetProperty("partition.system.verified", "").empty());
- bool vendor_verified = !(android::base::GetProperty("partition.vendor.verified", "").empty());
-
- std::vector<std::string> partitions{"/", "/odm", "/oem", "/product_services",
- "/product", "/vendor"};
-
- if (system_verified || vendor_verified) {
- // Disable verity automatically (reboot will be required).
- set_verity_enabled_state_service(unique_fd(dup(fd.get())), false);
-
- // If overlayfs is not supported, we try and remount or set up
- // un-deduplication. If it is supported, we can go ahead and wait for
- // a reboot.
- if (fs_mgr_overlayfs_valid() != OverlayfsValidResult::kNotSupported) {
- if (user_requested_reboot) {
- if (android::base::SetProperty(ANDROID_RB_PROPERTY, "reboot")) {
- WriteFdExactly(fd.get(), "rebooting device\n");
- } else {
- WriteFdExactly(fd.get(), "reboot failed\n");
- }
- }
- return;
- }
- } else if (fs_mgr_overlayfs_setup()) {
- // If we can use overlayfs, lets get it in place first before we
- // struggle with determining deduplication operations.
- Fstab fstab;
- if (ReadDefaultFstab(&fstab) && fs_mgr_overlayfs_mount_all(&fstab)) {
- WriteFdExactly(fd.get(), "overlayfs mounted\n");
- }
- }
-
- // If overlayfs is supported, we don't bother trying to un-deduplicate
- // partitions.
- std::set<std::string> dedup;
- if (fs_mgr_overlayfs_valid() == OverlayfsValidResult::kNotSupported) {
- // Find partitions that are deduplicated, and can be un-deduplicated.
- for (const auto& part : partitions) {
- auto partition = part;
- if ((part == "/") && !find_mount("/system", false).empty()) partition = "/system";
- std::string dev = find_mount(partition.c_str(), partition == "/");
- if (dev.empty() || !fs_mgr_has_shared_blocks(partition, dev)) {
- continue;
- }
- if (can_unshare_blocks(fd.get(), dev.c_str())) {
- dedup.emplace(partition);
- }
- }
-
- // Reboot now if the user requested it (and an operation needs a reboot).
- if (user_requested_reboot) {
- if (!dedup.empty()) {
- reboot_for_remount(fd.get(), !dedup.empty());
- return;
- }
- WriteFdExactly(fd.get(), "No reboot needed, skipping -R.\n");
- }
- }
-
- bool success = true;
- for (const auto& partition : partitions) {
- // Don't try to remount partitions that need an fsck in recovery.
- if (dedup.count(partition)) {
- continue;
- }
- success &= remount_partition(fd.get(), partition.c_str());
- }
-
- if (!dedup.empty()) {
- WriteFdExactly(fd.get(),
- "The following partitions are deduplicated and cannot "
- "yet be remounted:\n");
- for (const std::string& name : dedup) {
- WriteFdFmt(fd.get(), " %s\n", name.c_str());
- }
-
- WriteFdExactly(fd.get(),
- "To reboot and un-deduplicate the listed partitions, "
- "please retry with adb remount -R.\n");
- if (system_verified || vendor_verified) {
- WriteFdExactly(fd.get(), "Note: verity will be automatically disabled after reboot.\n");
- }
+ auto pid = vfork();
+ if (pid < 0) {
+ WriteFdFmt(fd.get(), "Failed to fork to %s: %s\n", remount_cmd, strerror(errno));
+ WriteFdExactly(fd.get(), remount_failed);
return;
}
- try_unmount_bionic(fd.get());
+ if (pid == 0) {
+ // child side of the fork
+ fcntl(fd.get(), F_SETFD, 0);
+ dup2(fd.get(), STDIN_FILENO);
+ dup2(fd.get(), STDOUT_FILENO);
+ dup2(fd.get(), STDERR_FILENO);
- if (!success) {
- WriteFdExactly(fd.get(), "remount failed\n");
- } else {
- WriteFdExactly(fd.get(), "remount succeeded\n");
+ execl(remount_cmd, remount_cmd, cmd.empty() ? nullptr : cmd.c_str(), nullptr);
+ _exit(-errno ?: 42);
}
+
+ int wstatus = 0;
+ auto ret = waitpid(pid, &wstatus, 0);
+
+ if (ret == -1) {
+ WriteFdFmt(fd.get(), "Failed to wait for %s: %s\n", remount_cmd, strerror(errno));
+ goto err;
+ }
+
+ if (ret != pid) {
+ WriteFdFmt(fd.get(), "pid %d and waitpid return %d do not match for %s\n",
+ static_cast<int>(pid), static_cast<int>(ret), remount_cmd);
+ goto err;
+ }
+
+ if (WIFSIGNALED(wstatus)) {
+ WriteFdFmt(fd.get(), "%s terminated with signal %s\n", remount_cmd,
+ strsignal(WTERMSIG(wstatus)));
+ goto err;
+ }
+
+ if (!WIFEXITED(wstatus)) {
+ WriteFdFmt(fd.get(), "%s stopped with status 0x%x\n", remount_cmd, wstatus);
+ goto err;
+ }
+
+ if (WEXITSTATUS(wstatus)) {
+ WriteFdFmt(fd.get(), "%s exited with status %d\n", remount_cmd,
+ static_cast<signed char>(WEXITSTATUS(wstatus)));
+ goto err;
+ }
+
+ WriteFdExactly(fd.get(), "remount succeeded\n");
+ return;
+
+err:
+ WriteFdExactly(fd.get(), remount_failed);
}
diff --git a/adb/daemon/remount_service.h b/adb/daemon/remount_service.h
index c847403..522a5da 100644
--- a/adb/daemon/remount_service.h
+++ b/adb/daemon/remount_service.h
@@ -21,6 +21,5 @@
#include "adb_unique_fd.h"
#if defined(__ANDROID__)
-bool make_block_device_writable(const std::string&);
void remount_service(unique_fd, const std::string&);
#endif
diff --git a/adb/daemon/set_verity_enable_state_service.cpp b/adb/daemon/set_verity_enable_state_service.cpp
index 92851c0..658261e 100644
--- a/adb/daemon/set_verity_enable_state_service.cpp
+++ b/adb/daemon/set_verity_enable_state_service.cpp
@@ -25,6 +25,7 @@
#include <libavb_user/libavb_user.h>
#include <stdarg.h>
#include <stdio.h>
+#include <sys/mount.h>
#include <sys/stat.h>
#include <android-base/properties.h>
@@ -37,7 +38,6 @@
#include "adb.h"
#include "adb_io.h"
#include "adb_unique_fd.h"
-#include "remount_service.h"
#include "fec/io.h"
@@ -51,6 +51,18 @@
if (getuid() != 0) WriteFdExactly(fd, "Maybe run adb root?\n");
}
+static bool make_block_device_writable(const std::string& dev) {
+ int fd = unix_open(dev, O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ return false;
+ }
+
+ int OFF = 0;
+ bool result = (ioctl(fd, BLKROSET, &OFF) != -1);
+ unix_close(fd);
+ return result;
+}
+
/* Turn verity on/off */
static bool set_verity_enabled_state(int fd, const char* block_device, const char* mount_point,
bool enable) {
diff --git a/adb/daemon/usb.cpp b/adb/daemon/usb.cpp
index f0e2861..598f2cd 100644
--- a/adb/daemon/usb.cpp
+++ b/adb/daemon/usb.cpp
@@ -29,6 +29,7 @@
#include <linux/usb/functionfs.h>
#include <sys/eventfd.h>
+#include <algorithm>
#include <array>
#include <future>
#include <memory>
@@ -56,10 +57,11 @@
// We can't find out whether we have support for AIO on ffs endpoints until we submit a read.
static std::optional<bool> gFfsAioSupported;
-static constexpr size_t kUsbReadQueueDepth = 16;
-static constexpr size_t kUsbReadSize = 16384;
+static constexpr size_t kUsbReadQueueDepth = 32;
+static constexpr size_t kUsbReadSize = 8 * PAGE_SIZE;
-static constexpr size_t kUsbWriteQueueDepth = 16;
+static constexpr size_t kUsbWriteQueueDepth = 32;
+static constexpr size_t kUsbWriteSize = 8 * PAGE_SIZE;
static const char* to_string(enum usb_functionfs_event_type type) {
switch (type) {
@@ -115,7 +117,7 @@
struct IoBlock {
bool pending;
struct iocb control;
- Block payload;
+ std::shared_ptr<Block> payload;
TransferId id() const { return TransferId::from_value(control.aio_data); }
};
@@ -207,8 +209,20 @@
std::lock_guard<std::mutex> lock(write_mutex_);
write_requests_.push_back(CreateWriteBlock(std::move(header), next_write_id_++));
if (!packet->payload.empty()) {
- write_requests_.push_back(
- CreateWriteBlock(std::move(packet->payload), next_write_id_++));
+ // The kernel attempts to allocate a contiguous block of memory for each write,
+ // which can fail if the write is large and the kernel heap is fragmented.
+ // Split large writes into smaller chunks to avoid this.
+ std::shared_ptr<Block> payload = std::make_shared<Block>(std::move(packet->payload));
+ size_t offset = 0;
+ size_t len = payload->size();
+
+ while (len > 0) {
+ size_t write_size = std::min(kUsbWriteSize, len);
+ write_requests_.push_back(
+ CreateWriteBlock(payload, offset, write_size, next_write_id_++));
+ len -= write_size;
+ offset += write_size;
+ }
}
SubmitWrites();
return true;
@@ -246,7 +260,6 @@
// until it dies, and then report failure to the transport via HandleError, which will
// eventually result in the transport being destroyed, which will result in UsbFfsConnection
// being destroyed, which unblocks the open thread and restarts this entire process.
- static constexpr int kInterruptionSignal = SIGUSR1;
static std::once_flag handler_once;
std::call_once(handler_once, []() { signal(kInterruptionSignal, [](int) {}); });
@@ -272,6 +285,7 @@
} else if (rc == 0) {
// Something in the kernel presumably went wrong.
// Close our endpoints, wait for a bit, and then try again.
+ StopWorker();
aio_context_.reset();
read_fd_.reset();
write_fd_.reset();
@@ -297,7 +311,7 @@
switch (event.type) {
case FUNCTIONFS_BIND:
- CHECK(!started) << "received FUNCTIONFS_ENABLE while already bound?";
+ CHECK(!bound) << "received FUNCTIONFS_BIND while already bound?";
bound = true;
break;
@@ -313,28 +327,7 @@
}
}
- pthread_t worker_thread_handle = worker_thread_.native_handle();
- while (true) {
- int rc = pthread_kill(worker_thread_handle, kInterruptionSignal);
- if (rc != 0) {
- LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
- break;
- }
-
- std::this_thread::sleep_for(100ms);
-
- rc = pthread_kill(worker_thread_handle, 0);
- if (rc == 0) {
- continue;
- } else if (rc == ESRCH) {
- break;
- } else {
- LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
- }
- }
-
- worker_thread_.join();
-
+ StopWorker();
aio_context_.reset();
read_fd_.reset();
write_fd_.reset();
@@ -365,12 +358,36 @@
});
}
+ void StopWorker() {
+ pthread_t worker_thread_handle = worker_thread_.native_handle();
+ while (true) {
+ int rc = pthread_kill(worker_thread_handle, kInterruptionSignal);
+ if (rc != 0) {
+ LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
+ break;
+ }
+
+ std::this_thread::sleep_for(100ms);
+
+ rc = pthread_kill(worker_thread_handle, 0);
+ if (rc == 0) {
+ continue;
+ } else if (rc == ESRCH) {
+ break;
+ } else {
+ LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
+ }
+ }
+
+ worker_thread_.join();
+ }
+
void PrepareReadBlock(IoBlock* block, uint64_t id) {
block->pending = false;
- block->payload.resize(kUsbReadSize);
+ block->payload = std::make_shared<Block>(kUsbReadSize);
block->control.aio_data = static_cast<uint64_t>(TransferId::read(id));
- block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload.data());
- block->control.aio_nbytes = block->payload.size();
+ block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data());
+ block->control.aio_nbytes = block->payload->size();
}
IoBlock CreateReadBlock(uint64_t id) {
@@ -421,7 +438,7 @@
uint64_t read_idx = id.id % kUsbReadQueueDepth;
IoBlock* block = &read_requests_[read_idx];
block->pending = false;
- block->payload.resize(size);
+ block->payload->resize(size);
// Notification for completed reads can be received out of order.
if (block->id().id != needed_read_id_) {
@@ -442,16 +459,16 @@
}
void ProcessRead(IoBlock* block) {
- if (!block->payload.empty()) {
+ if (!block->payload->empty()) {
if (!incoming_header_.has_value()) {
- CHECK_EQ(sizeof(amessage), block->payload.size());
+ CHECK_EQ(sizeof(amessage), block->payload->size());
amessage msg;
- memcpy(&msg, block->payload.data(), sizeof(amessage));
+ memcpy(&msg, block->payload->data(), sizeof(amessage));
LOG(DEBUG) << "USB read:" << dump_header(&msg);
incoming_header_ = msg;
} else {
size_t bytes_left = incoming_header_->data_length - incoming_payload_.size();
- Block payload = std::move(block->payload);
+ Block payload = std::move(*block->payload);
CHECK_LE(payload.size(), bytes_left);
incoming_payload_.append(std::make_unique<Block>(std::move(payload)));
}
@@ -506,7 +523,8 @@
SubmitWrites();
}
- std::unique_ptr<IoBlock> CreateWriteBlock(Block payload, uint64_t id) {
+ std::unique_ptr<IoBlock> CreateWriteBlock(std::shared_ptr<Block> payload, size_t offset,
+ size_t len, uint64_t id) {
auto block = std::make_unique<IoBlock>();
block->payload = std::move(payload);
block->control.aio_data = static_cast<uint64_t>(TransferId::write(id));
@@ -514,14 +532,20 @@
block->control.aio_lio_opcode = IOCB_CMD_PWRITE;
block->control.aio_reqprio = 0;
block->control.aio_fildes = write_fd_.get();
- block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload.data());
- block->control.aio_nbytes = block->payload.size();
+ block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data() + offset);
+ block->control.aio_nbytes = len;
block->control.aio_offset = 0;
block->control.aio_flags = IOCB_FLAG_RESFD;
block->control.aio_resfd = worker_event_fd_.get();
return block;
}
+ std::unique_ptr<IoBlock> CreateWriteBlock(Block payload, uint64_t id) {
+ std::shared_ptr<Block> block = std::make_shared<Block>(std::move(payload));
+ size_t len = block->size();
+ return CreateWriteBlock(std::move(block), 0, len, id);
+ }
+
void SubmitWrites() REQUIRES(write_mutex_) {
if (writes_submitted_ == kUsbWriteQueueDepth) {
return;
@@ -594,6 +618,8 @@
std::deque<std::unique_ptr<IoBlock>> write_requests_ GUARDED_BY(write_mutex_);
size_t next_write_id_ GUARDED_BY(write_mutex_) = 0;
size_t writes_submitted_ GUARDED_BY(write_mutex_) = 0;
+
+ static constexpr int kInterruptionSignal = SIGUSR1;
};
void usb_init_legacy();
diff --git a/bootstat/OWNERS b/bootstat/OWNERS
index 7fe0443..50b2097 100644
--- a/bootstat/OWNERS
+++ b/bootstat/OWNERS
@@ -1 +1,2 @@
jhawkins@google.com
+salyzyn@google.com
diff --git a/fastboot/fastboot_driver.cpp b/fastboot/fastboot_driver.cpp
index 65a5247..fea0a77 100644
--- a/fastboot/fastboot_driver.cpp
+++ b/fastboot/fastboot_driver.cpp
@@ -403,7 +403,7 @@
RetCode FastBootDriver::HandleResponse(std::string* response, std::vector<std::string>* info,
int* dsize) {
char status[FB_RESPONSE_SZ + 1];
- auto start = std::chrono::system_clock::now();
+ auto start = std::chrono::steady_clock::now();
auto set_response = [response](std::string s) {
if (response) *response = std::move(s);
@@ -414,7 +414,7 @@
// erase response
set_response("");
- while ((std::chrono::system_clock::now() - start) < std::chrono::seconds(RESP_TIMEOUT)) {
+ while ((std::chrono::steady_clock::now() - start) < std::chrono::seconds(RESP_TIMEOUT)) {
int r = transport_->Read(status, FB_RESPONSE_SZ);
if (r < 0) {
error_ = ErrnoStr("Status read failed");
@@ -427,6 +427,11 @@
std::string tmp = input.substr(strlen("INFO"));
info_(tmp);
add_info(std::move(tmp));
+ // We may receive one or more INFO packets during long operations,
+ // e.g. flash/erase if they are back by slow media like NAND/NOR
+ // flash. In that case, reset the timer since it's not a real
+ // timeout.
+ start = std::chrono::steady_clock::now();
} else if (android::base::StartsWith(input, "OKAY")) {
set_response(input.substr(strlen("OKAY")));
return SUCCESS;
diff --git a/fs_mgr/fs_mgr_fstab.cpp b/fs_mgr/fs_mgr_fstab.cpp
index 5d4a3cc..2f1e41f 100644
--- a/fs_mgr/fs_mgr_fstab.cpp
+++ b/fs_mgr/fs_mgr_fstab.cpp
@@ -307,6 +307,8 @@
} else {
entry->logical_blk_size = val;
}
+ } else if (StartsWith(flag, "avb_keys=")) { // must before the following "avb"
+ entry->avb_keys = arg;
} else if (StartsWith(flag, "avb")) {
entry->fs_mgr_flags.avb = true;
entry->vbmeta_partition = arg;
@@ -325,8 +327,6 @@
}
} else if (StartsWith(flag, "zram_backing_dev_path=")) {
entry->zram_backing_dev_path = arg;
- } else if (StartsWith(flag, "avb_keys=")) {
- entry->avb_keys = arg;
} else {
LWARNING << "Warning: unknown flag: " << flag;
}
diff --git a/fs_mgr/fs_mgr_remount.cpp b/fs_mgr/fs_mgr_remount.cpp
index c0e0ccd..6312734 100644
--- a/fs_mgr/fs_mgr_remount.cpp
+++ b/fs_mgr/fs_mgr_remount.cpp
@@ -42,13 +42,14 @@
[[noreturn]] void usage(int exit_status) {
LOG(INFO) << getprogname()
- << " [-h] [-R] [-T fstab_file]\n"
+ << " [-h] [-R] [-T fstab_file] [partition]...\n"
"\t-h --help\tthis help\n"
"\t-R --reboot\tdisable verity & reboot to facilitate remount\n"
"\t-T --fstab\tcustom fstab file location\n"
+ "\tpartition\tspecific partition(s) (empty does all)\n"
"\n"
- "Remount all partitions read-write.\n"
- "-R notwithstanding, verity must be disabled.";
+ "Remount specified partition(s) read-write, by name or mount point.\n"
+ "-R notwithstanding, verity must be disabled on partition(s).";
::exit(exit_status);
}
@@ -138,6 +139,8 @@
BADARG,
NOT_ROOT,
NO_FSTAB,
+ UNKNOWN_PARTITION,
+ INVALID_PARTITION,
VERITY_PARTITION,
BAD_OVERLAY,
NO_MOUNTS,
@@ -183,11 +186,6 @@
}
}
- if (argc > optind) {
- LOG(ERROR) << "Bad Argument " << argv[optind];
- usage(BADARG);
- }
-
// Make sure we are root.
if (::getuid() != 0) {
LOG(ERROR) << "must be run as root";
@@ -211,16 +209,58 @@
auto overlayfs_candidates = fs_mgr_overlayfs_candidate_list(fstab);
// Generate the all remountable partitions sub-list
- android::fs_mgr::Fstab partitions;
+ android::fs_mgr::Fstab all;
for (auto const& entry : fstab) {
if (!remountable_partition(entry)) continue;
if (overlayfs_candidates.empty() ||
GetEntryForMountPoint(&overlayfs_candidates, entry.mount_point) ||
(is_wrapped(overlayfs_candidates, entry) == nullptr)) {
- partitions.emplace_back(entry);
+ all.emplace_back(entry);
}
}
+ // Parse the unique list of valid partition arguments.
+ android::fs_mgr::Fstab partitions;
+ for (; argc > optind; ++optind) {
+ auto partition = std::string(argv[optind]);
+ if (partition.empty()) continue;
+ if (partition == "/") partition = "/system";
+ auto find_part = [&partition](const auto& entry) {
+ const auto mount_point = system_mount_point(entry);
+ if (partition == mount_point) return true;
+ if (partition == android::base::Basename(mount_point)) return true;
+ return false;
+ };
+ // Do we know about the partition?
+ auto it = std::find_if(fstab.begin(), fstab.end(), find_part);
+ if (it == fstab.end()) {
+ LOG(ERROR) << "Unknown partition " << partition << ", skipping";
+ retval = UNKNOWN_PARTITION;
+ continue;
+ }
+ // Is that one covered by an existing overlayfs?
+ auto wrap = is_wrapped(overlayfs_candidates, *it);
+ if (wrap) {
+ LOG(INFO) << "partition " << partition << " covered by overlayfs for "
+ << wrap->mount_point << ", switching";
+ partition = system_mount_point(*wrap);
+ }
+ // Is it a remountable partition?
+ it = std::find_if(all.begin(), all.end(), find_part);
+ if (it == all.end()) {
+ LOG(ERROR) << "Invalid partition " << partition << ", skipping";
+ retval = INVALID_PARTITION;
+ continue;
+ }
+ if (GetEntryForMountPoint(&partitions, it->mount_point) == nullptr) {
+ partitions.emplace_back(*it);
+ }
+ }
+
+ if (partitions.empty() && !retval) {
+ partitions = all;
+ }
+
// Check verity and optionally setup overlayfs backing.
auto reboot_later = false;
for (auto it = partitions.begin(); it != partitions.end();) {
diff --git a/fs_mgr/libfiemap_writer/Android.bp b/fs_mgr/libfiemap_writer/Android.bp
index 33c3cad..7463810 100644
--- a/fs_mgr/libfiemap_writer/Android.bp
+++ b/fs_mgr/libfiemap_writer/Android.bp
@@ -20,13 +20,17 @@
recovery_available: true,
export_include_dirs: ["include"],
cflags: [
- // TODO(b/121211685): Allows us to create a skeleton of required classes
- "-Wno-unused-private-field",
- "-Wno-unused-parameter",
+ "-D_FILE_OFFSET_BITS=64",
],
srcs: [
"fiemap_writer.cpp",
+ "split_fiemap_writer.cpp",
+ "utility.cpp",
+ ],
+
+ static_libs: [
+ "libext4_utils",
],
header_libs: [
diff --git a/fs_mgr/libfiemap_writer/fiemap_writer.cpp b/fs_mgr/libfiemap_writer/fiemap_writer.cpp
index 6b742e6..3d41876 100644
--- a/fs_mgr/libfiemap_writer/fiemap_writer.cpp
+++ b/fs_mgr/libfiemap_writer/fiemap_writer.cpp
@@ -27,6 +27,7 @@
#include <sys/vfs.h>
#include <unistd.h>
+#include <limits>
#include <string>
#include <utility>
#include <vector>
@@ -200,17 +201,21 @@
return false;
}
- if (file_size % sfs.f_bsize) {
- LOG(ERROR) << "File size " << file_size << " is not aligned to optimal block size "
- << sfs.f_bsize << " for file " << file_path;
+ if (!sfs.f_bsize) {
+ LOG(ERROR) << "Unsupported block size: " << sfs.f_bsize;
return false;
}
// Check if the filesystem is of supported types.
- // Only ext4 and f2fs are tested and supported.
- if ((sfs.f_type != EXT4_SUPER_MAGIC) && (sfs.f_type != F2FS_SUPER_MAGIC)) {
- LOG(ERROR) << "Unsupported file system type: 0x" << std::hex << sfs.f_type;
- return false;
+ // Only ext4, f2fs, and vfat are tested and supported.
+ switch (sfs.f_type) {
+ case EXT4_SUPER_MAGIC:
+ case F2FS_SUPER_MAGIC:
+ case MSDOS_SUPER_MAGIC:
+ break;
+ default:
+ LOG(ERROR) << "Unsupported file system type: 0x" << std::hex << sfs.f_type;
+ return false;
}
uint64_t available_bytes = sfs.f_bsize * sfs.f_bavail;
@@ -225,14 +230,46 @@
}
static bool AllocateFile(int file_fd, const std::string& file_path, uint64_t blocksz,
- uint64_t file_size, std::function<bool(uint64_t, uint64_t)> on_progress) {
+ uint64_t file_size, unsigned int fs_type,
+ std::function<bool(uint64_t, uint64_t)> on_progress) {
// Reserve space for the file on the file system and write it out to make sure the extents
// don't come back unwritten. Return from this function with the kernel file offset set to 0.
// If the filesystem is f2fs, then we also PIN the file on disk to make sure the blocks
// aren't moved around.
- if (fallocate64(file_fd, FALLOC_FL_ZERO_RANGE, 0, file_size)) {
- PLOG(ERROR) << "Failed to allocate space for file: " << file_path << " size: " << file_size;
- return false;
+ switch (fs_type) {
+ case EXT4_SUPER_MAGIC:
+ case F2FS_SUPER_MAGIC:
+ if (fallocate(file_fd, FALLOC_FL_ZERO_RANGE, 0, file_size)) {
+ PLOG(ERROR) << "Failed to allocate space for file: " << file_path
+ << " size: " << file_size;
+ return false;
+ }
+ break;
+ case MSDOS_SUPER_MAGIC: {
+ // fallocate() is not supported, and not needed, since VFAT does not support holes.
+ // Instead we can perform a much faster allocation.
+ auto offset = TEMP_FAILURE_RETRY(lseek(file_fd, file_size - 1, SEEK_SET));
+ if (offset < 0) {
+ PLOG(ERROR) << "Failed to lseek " << file_path;
+ return false;
+ }
+ if (offset != file_size - 1) {
+ LOG(ERROR) << "Seek returned wrong offset " << offset << " for file " << file_path;
+ return false;
+ }
+ char buffer[] = {0};
+ if (!android::base::WriteFully(file_fd, buffer, 1)) {
+ PLOG(ERROR) << "Write failed: " << file_path;
+ return false;
+ }
+ if (on_progress && !on_progress(file_size, file_size)) {
+ return false;
+ }
+ return true;
+ }
+ default:
+ LOG(ERROR) << "Missing fallocate() support for file system " << fs_type;
+ return false;
}
// write zeroes in 'blocksz' byte increments until we reach file_size to make sure the data
@@ -287,9 +324,9 @@
}
static bool PinFile(int file_fd, const std::string& file_path, uint32_t fs_type) {
- if (fs_type == EXT4_SUPER_MAGIC) {
- // No pinning necessary for ext4. The blocks, once allocated, are expected
- // to be fixed.
+ if (fs_type != F2FS_SUPER_MAGIC) {
+ // No pinning necessary for ext4/msdos. The blocks, once allocated, are
+ // expected to be fixed.
return true;
}
@@ -324,9 +361,9 @@
}
static bool IsFilePinned(int file_fd, const std::string& file_path, uint32_t fs_type) {
- if (fs_type == EXT4_SUPER_MAGIC) {
- // No pinning necessary for ext4. The blocks, once allocated, are expected
- // to be fixed.
+ if (fs_type != F2FS_SUPER_MAGIC) {
+ // No pinning necessary for ext4 or vfat. The blocks, once allocated,
+ // are expected to be fixed.
return true;
}
@@ -438,6 +475,44 @@
return last_extent_seen;
}
+static bool ReadFibmap(int file_fd, const std::string& file_path,
+ std::vector<struct fiemap_extent>* extents) {
+ struct stat s;
+ if (fstat(file_fd, &s)) {
+ PLOG(ERROR) << "Failed to stat " << file_path;
+ return false;
+ }
+
+ uint64_t num_blocks = (s.st_size + s.st_blksize - 1) / s.st_blksize;
+ if (num_blocks > std::numeric_limits<uint32_t>::max()) {
+ LOG(ERROR) << "Too many blocks for FIBMAP (" << num_blocks << ")";
+ return false;
+ }
+
+ for (uint32_t last_block, block_number = 0; block_number < num_blocks; block_number++) {
+ uint32_t block = block_number;
+ if (ioctl(file_fd, FIBMAP, &block)) {
+ PLOG(ERROR) << "Failed to get FIBMAP for file " << file_path;
+ return false;
+ }
+ if (!block) {
+ LOG(ERROR) << "Logical block " << block_number << " is a hole, which is not supported";
+ return false;
+ }
+
+ if (!extents->empty() && block == last_block + 1) {
+ extents->back().fe_length++;
+ } else {
+ extents->push_back(fiemap_extent{.fe_logical = block_number,
+ .fe_physical = block,
+ .fe_length = 1,
+ .fe_flags = 0});
+ }
+ last_block = block;
+ }
+ return true;
+}
+
FiemapUniquePtr FiemapWriter::Open(const std::string& file_path, uint64_t file_size, bool create,
std::function<bool(uint64_t, uint64_t)> progress) {
// if 'create' is false, open an existing file and do not truncate.
@@ -500,8 +575,13 @@
return nullptr;
}
+ // Align up to the nearest block size.
+ if (file_size % blocksz) {
+ file_size += blocksz - (file_size % blocksz);
+ }
+
if (create) {
- if (!AllocateFile(file_fd, abs_path, blocksz, file_size, std::move(progress))) {
+ if (!AllocateFile(file_fd, abs_path, blocksz, file_size, fs_type, std::move(progress))) {
LOG(ERROR) << "Failed to allocate file: " << abs_path << " of size: " << file_size
<< " bytes";
cleanup(abs_path, create);
@@ -518,10 +598,22 @@
// now allocate the FiemapWriter and start setting it up
FiemapUniquePtr fmap(new FiemapWriter());
- if (!ReadFiemap(file_fd, abs_path, &fmap->extents_)) {
- LOG(ERROR) << "Failed to read fiemap of file: " << abs_path;
- cleanup(abs_path, create);
- return nullptr;
+ switch (fs_type) {
+ case EXT4_SUPER_MAGIC:
+ case F2FS_SUPER_MAGIC:
+ if (!ReadFiemap(file_fd, abs_path, &fmap->extents_)) {
+ LOG(ERROR) << "Failed to read fiemap of file: " << abs_path;
+ cleanup(abs_path, create);
+ return nullptr;
+ }
+ break;
+ case MSDOS_SUPER_MAGIC:
+ if (!ReadFibmap(file_fd, abs_path, &fmap->extents_)) {
+ LOG(ERROR) << "Failed to read fibmap of file: " << abs_path;
+ cleanup(abs_path, create);
+ return nullptr;
+ }
+ break;
}
fmap->file_path_ = abs_path;
@@ -532,14 +624,10 @@
fmap->fs_type_ = fs_type;
fmap->block_size_ = blocksz;
- LOG(INFO) << "Successfully created FiemapWriter for file " << abs_path << " on block device "
- << bdev_path;
+ LOG(VERBOSE) << "Successfully created FiemapWriter for file " << abs_path << " on block device "
+ << bdev_path;
return fmap;
}
-bool FiemapWriter::Read(off64_t off, uint8_t* buffer, uint64_t size) {
- return false;
-}
-
} // namespace fiemap_writer
} // namespace android
diff --git a/fs_mgr/libfiemap_writer/fiemap_writer_test.cpp b/fs_mgr/libfiemap_writer/fiemap_writer_test.cpp
index 3e8381b..ab4efae 100644
--- a/fs_mgr/libfiemap_writer/fiemap_writer_test.cpp
+++ b/fs_mgr/libfiemap_writer/fiemap_writer_test.cpp
@@ -22,6 +22,7 @@
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <sys/vfs.h>
#include <unistd.h>
#include <string>
@@ -32,8 +33,10 @@
#include <android-base/unique_fd.h>
#include <gtest/gtest.h>
#include <libdm/loop_control.h>
-
#include <libfiemap_writer/fiemap_writer.h>
+#include <libfiemap_writer/split_fiemap_writer.h>
+
+#include "utility.h"
using namespace std;
using namespace std::string_literals;
@@ -43,6 +46,7 @@
std::string gTestDir;
uint64_t testfile_size = 536870912; // default of 512MiB
+size_t gBlockSize = 0;
class FiemapWriterTest : public ::testing::Test {
protected:
@@ -57,6 +61,24 @@
std::string testfile;
};
+class SplitFiemapTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ const ::testing::TestInfo* tinfo = ::testing::UnitTest::GetInstance()->current_test_info();
+ testfile = gTestDir + "/"s + tinfo->name();
+ }
+
+ void TearDown() override {
+ std::string message;
+ if (!SplitFiemap::RemoveSplitFiles(testfile, &message)) {
+ cerr << "Could not remove all split files: " << message;
+ }
+ }
+
+ // name of the file we use for testing
+ std::string testfile;
+};
+
TEST_F(FiemapWriterTest, CreateImpossiblyLargeFile) {
// Try creating a file of size ~100TB but aligned to
// 512 byte to make sure block alignment tests don't
@@ -69,39 +91,47 @@
TEST_F(FiemapWriterTest, CreateUnalignedFile) {
// Try creating a file of size 4097 bytes which is guaranteed
- // to be unaligned to all known block sizes. The creation must
- // fail.
- FiemapUniquePtr fptr = FiemapWriter::Open(testfile, 4097);
- EXPECT_EQ(fptr, nullptr);
- EXPECT_EQ(access(testfile.c_str(), F_OK), -1);
- EXPECT_EQ(errno, ENOENT);
+ // to be unaligned to all known block sizes.
+ FiemapUniquePtr fptr = FiemapWriter::Open(testfile, gBlockSize + 1);
+ ASSERT_NE(fptr, nullptr);
+ ASSERT_EQ(fptr->size(), gBlockSize * 2);
}
TEST_F(FiemapWriterTest, CheckFilePath) {
- FiemapUniquePtr fptr = FiemapWriter::Open(testfile, 4096);
+ FiemapUniquePtr fptr = FiemapWriter::Open(testfile, gBlockSize);
ASSERT_NE(fptr, nullptr);
- EXPECT_EQ(fptr->size(), 4096);
+ EXPECT_EQ(fptr->size(), gBlockSize);
EXPECT_EQ(fptr->file_path(), testfile);
EXPECT_EQ(access(testfile.c_str(), F_OK), 0);
}
TEST_F(FiemapWriterTest, CheckProgress) {
- std::vector<uint64_t> expected{
- 0,
- 4096,
- };
+ std::vector<uint64_t> expected;
size_t invocations = 0;
auto callback = [&](uint64_t done, uint64_t total) -> bool {
EXPECT_LT(invocations, expected.size());
EXPECT_EQ(done, expected[invocations]);
- EXPECT_EQ(total, 4096);
+ EXPECT_EQ(total, gBlockSize);
invocations++;
return true;
};
- auto ptr = FiemapWriter::Open(testfile, 4096, true, std::move(callback));
+ uint32_t fs_type;
+ {
+ auto ptr = FiemapWriter::Open(testfile, gBlockSize, true);
+ ASSERT_NE(ptr, nullptr);
+ fs_type = ptr->fs_type();
+ }
+ ASSERT_EQ(unlink(testfile.c_str()), 0);
+
+ if (fs_type != MSDOS_SUPER_MAGIC) {
+ expected.push_back(0);
+ }
+ expected.push_back(gBlockSize);
+
+ auto ptr = FiemapWriter::Open(testfile, gBlockSize, true, std::move(callback));
EXPECT_NE(ptr, nullptr);
- EXPECT_EQ(invocations, 2);
+ EXPECT_EQ(invocations, expected.size());
}
TEST_F(FiemapWriterTest, CheckPinning) {
@@ -111,8 +141,8 @@
}
TEST_F(FiemapWriterTest, CheckBlockDevicePath) {
- FiemapUniquePtr fptr = FiemapWriter::Open(testfile, 4096);
- EXPECT_EQ(fptr->size(), 4096);
+ FiemapUniquePtr fptr = FiemapWriter::Open(testfile, gBlockSize);
+ EXPECT_EQ(fptr->size(), gBlockSize);
EXPECT_EQ(fptr->bdev_path().find("/dev/block/"), size_t(0));
EXPECT_EQ(fptr->bdev_path().find("/dev/block/dm-"), string::npos);
}
@@ -139,44 +169,69 @@
EXPECT_GT(fptr->extents().size(), 0);
}
-class TestExistingFile : public ::testing::Test {
- protected:
- void SetUp() override {
- unaligned_file_ = gTestDir + "/unaligned_file";
- file_4k_ = gTestDir + "/file_4k";
- file_32k_ = gTestDir + "/file_32k";
+TEST_F(FiemapWriterTest, ExistingFile) {
+ // Create the file.
+ { ASSERT_NE(FiemapWriter::Open(testfile, gBlockSize), nullptr); }
+ // Test that we can still open it.
+ {
+ auto ptr = FiemapWriter::Open(testfile, 0, false);
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_GT(ptr->extents().size(), 0);
+ }
+}
- CleanupFiles();
- fptr_unaligned = FiemapWriter::Open(unaligned_file_, 4097);
- fptr_4k = FiemapWriter::Open(file_4k_, 4096);
- fptr_32k = FiemapWriter::Open(file_32k_, 32768);
+TEST_F(FiemapWriterTest, FileDeletedOnError) {
+ auto callback = [](uint64_t, uint64_t) -> bool { return false; };
+ auto ptr = FiemapWriter::Open(testfile, gBlockSize, true, std::move(callback));
+ EXPECT_EQ(ptr, nullptr);
+ EXPECT_EQ(access(testfile.c_str(), F_OK), -1);
+ EXPECT_EQ(errno, ENOENT);
+}
+
+TEST_F(FiemapWriterTest, MaxBlockSize) {
+ ASSERT_GT(DetermineMaximumFileSize(testfile), 0);
+}
+
+TEST_F(SplitFiemapTest, Create) {
+ auto ptr = SplitFiemap::Create(testfile, 1024 * 768, 1024 * 32);
+ ASSERT_NE(ptr, nullptr);
+
+ auto extents = ptr->extents();
+
+ // Destroy the fiemap, closing file handles. This should not delete them.
+ ptr = nullptr;
+
+ std::vector<std::string> files;
+ ASSERT_TRUE(SplitFiemap::GetSplitFileList(testfile, &files));
+ for (const auto& path : files) {
+ EXPECT_EQ(access(path.c_str(), F_OK), 0);
}
- void TearDown() { CleanupFiles(); }
+ ASSERT_GE(extents.size(), files.size());
+}
- void CleanupFiles() {
- unlink(unaligned_file_.c_str());
- unlink(file_4k_.c_str());
- unlink(file_32k_.c_str());
+TEST_F(SplitFiemapTest, Open) {
+ {
+ auto ptr = SplitFiemap::Create(testfile, 1024 * 768, 1024 * 32);
+ ASSERT_NE(ptr, nullptr);
}
- std::string unaligned_file_;
- std::string file_4k_;
- std::string file_32k_;
- FiemapUniquePtr fptr_unaligned;
- FiemapUniquePtr fptr_4k;
- FiemapUniquePtr fptr_32k;
-};
+ auto ptr = SplitFiemap::Open(testfile);
+ ASSERT_NE(ptr, nullptr);
-TEST_F(TestExistingFile, ErrorChecks) {
- EXPECT_EQ(fptr_unaligned, nullptr);
- EXPECT_NE(fptr_4k, nullptr);
- EXPECT_NE(fptr_32k, nullptr);
+ auto extents = ptr->extents();
+ ASSERT_GE(extents.size(), 24);
+}
- EXPECT_EQ(fptr_4k->size(), 4096);
- EXPECT_EQ(fptr_32k->size(), 32768);
- EXPECT_GT(fptr_4k->extents().size(), 0);
- EXPECT_GT(fptr_32k->extents().size(), 0);
+TEST_F(SplitFiemapTest, DeleteOnFail) {
+ auto ptr = SplitFiemap::Create(testfile, 1024 * 1024 * 10, 1);
+ ASSERT_EQ(ptr, nullptr);
+
+ std::string first_file = testfile + ".0001";
+ ASSERT_NE(access(first_file.c_str(), F_OK), 0);
+ ASSERT_EQ(errno, ENOENT);
+ ASSERT_NE(access(testfile.c_str(), F_OK), 0);
+ ASSERT_EQ(errno, ENOENT);
}
class VerifyBlockWritesExt4 : public ::testing::Test {
@@ -263,6 +318,21 @@
std::string fs_path;
};
+bool DetermineBlockSize() {
+ struct statfs s;
+ if (statfs(gTestDir.c_str(), &s)) {
+ std::cerr << "Could not call statfs: " << strerror(errno) << "\n";
+ return false;
+ }
+ if (!s.f_bsize) {
+ std::cerr << "Invalid block size: " << s.f_bsize << "\n";
+ return false;
+ }
+
+ gBlockSize = s.f_bsize;
+ return true;
+}
+
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
if (argc <= 1) {
@@ -275,10 +345,13 @@
std::string tempdir = argv[1] + "/XXXXXX"s;
if (!mkdtemp(tempdir.data())) {
- cerr << "unable to create tempdir on " << argv[1];
+ cerr << "unable to create tempdir on " << argv[1] << "\n";
exit(EXIT_FAILURE);
}
- gTestDir = tempdir;
+ if (!android::base::Realpath(tempdir, &gTestDir)) {
+ cerr << "unable to find realpath for " << tempdir;
+ exit(EXIT_FAILURE);
+ }
if (argc > 2) {
testfile_size = strtoull(argv[2], NULL, 0);
@@ -287,6 +360,10 @@
}
}
+ if (!DetermineBlockSize()) {
+ exit(EXIT_FAILURE);
+ }
+
auto result = RUN_ALL_TESTS();
std::string cmd = "rm -rf " + gTestDir;
diff --git a/fs_mgr/libfiemap_writer/include/libfiemap_writer/fiemap_writer.h b/fs_mgr/libfiemap_writer/include/libfiemap_writer/fiemap_writer.h
index b5472c9..831bc75 100644
--- a/fs_mgr/libfiemap_writer/include/libfiemap_writer/fiemap_writer.h
+++ b/fs_mgr/libfiemap_writer/include/libfiemap_writer/fiemap_writer.h
@@ -41,6 +41,9 @@
// invoked, if create is true, while the file is being initialized. It receives the bytes
// written and the number of total bytes. If the callback returns false, the operation will
// fail.
+ //
+ // Note: when create is true, the file size will be aligned up to the nearest file system
+ // block.
static FiemapUniquePtr Open(const std::string& file_path, uint64_t file_size,
bool create = true,
std::function<bool(uint64_t, uint64_t)> progress = {});
@@ -64,11 +67,6 @@
static bool GetBlockDeviceForFile(const std::string& file_path, std::string* bdev_path,
bool* uses_dm = nullptr);
- // The counter part of Write(). It is an error for the offset to be unaligned with
- // the block device's block size.
- // In case of error, the contents of buffer MUST be discarded.
- bool Read(off64_t off, uint8_t* buffer, uint64_t size);
-
~FiemapWriter() = default;
const std::string& file_path() const { return file_path_; };
@@ -76,6 +74,7 @@
const std::string& bdev_path() const { return bdev_path_; };
uint64_t block_size() const { return block_size_; };
const std::vector<struct fiemap_extent>& extents() { return extents_; };
+ uint32_t fs_type() const { return fs_type_; }
// Non-copyable & Non-movable
FiemapWriter(const FiemapWriter&) = delete;
diff --git a/fs_mgr/libfiemap_writer/include/libfiemap_writer/split_fiemap_writer.h b/fs_mgr/libfiemap_writer/include/libfiemap_writer/split_fiemap_writer.h
new file mode 100644
index 0000000..765cc84
--- /dev/null
+++ b/fs_mgr/libfiemap_writer/include/libfiemap_writer/split_fiemap_writer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "fiemap_writer.h"
+
+namespace android {
+namespace fiemap_writer {
+
+// Wrapper around FiemapWriter that is able to split images across files if
+// necessary.
+class SplitFiemap final {
+ public:
+ using ProgressCallback = std::function<bool(uint64_t, uint64_t)>;
+
+ // Create a new split fiemap file. If |max_piece_size| is 0, the number of
+ // pieces will be determined automatically by detecting the filesystem.
+ // Otherwise, the file will be split evenly (with the remainder in the
+ // final file).
+ static std::unique_ptr<SplitFiemap> Create(const std::string& file_path, uint64_t file_size,
+ uint64_t max_piece_size,
+ ProgressCallback progress = {});
+
+ // Open an existing split fiemap file.
+ static std::unique_ptr<SplitFiemap> Open(const std::string& file_path);
+
+ ~SplitFiemap();
+
+ // Return a list of all files created for a split file.
+ static bool GetSplitFileList(const std::string& file_path, std::vector<std::string>* list);
+
+ // Destroy all components of a split file. If the root file does not exist,
+ // this returns true and does not report an error.
+ static bool RemoveSplitFiles(const std::string& file_path, std::string* message = nullptr);
+
+ const std::vector<struct fiemap_extent>& extents();
+ uint32_t block_size() const;
+ uint64_t size() const { return total_size_; }
+
+ // Non-copyable & Non-movable
+ SplitFiemap(const SplitFiemap&) = delete;
+ SplitFiemap& operator=(const SplitFiemap&) = delete;
+ SplitFiemap& operator=(SplitFiemap&&) = delete;
+ SplitFiemap(SplitFiemap&&) = delete;
+
+ private:
+ SplitFiemap() = default;
+ void AddFile(FiemapUniquePtr&& file);
+
+ bool creating_ = false;
+ std::string list_file_;
+ std::vector<FiemapUniquePtr> files_;
+ std::vector<struct fiemap_extent> extents_;
+ uint64_t total_size_ = 0;
+};
+
+} // namespace fiemap_writer
+} // namespace android
diff --git a/fs_mgr/libfiemap_writer/split_fiemap_writer.cpp b/fs_mgr/libfiemap_writer/split_fiemap_writer.cpp
new file mode 100644
index 0000000..1f80370
--- /dev/null
+++ b/fs_mgr/libfiemap_writer/split_fiemap_writer.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <libfiemap_writer/split_fiemap_writer.h>
+
+#include <fcntl.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+
+#include "utility.h"
+
+namespace android {
+namespace fiemap_writer {
+
+using android::base::unique_fd;
+
+// We use a four-digit suffix at the end of filenames.
+static const size_t kMaxFilePieces = 500;
+
+std::unique_ptr<SplitFiemap> SplitFiemap::Create(const std::string& file_path, uint64_t file_size,
+ uint64_t max_piece_size,
+ ProgressCallback progress) {
+ if (!file_size) {
+ LOG(ERROR) << "Cannot create a fiemap for a 0-length file: " << file_path;
+ return nullptr;
+ }
+
+ if (!max_piece_size) {
+ max_piece_size = DetermineMaximumFileSize(file_path);
+ if (!max_piece_size) {
+ LOG(ERROR) << "Could not determine maximum file size for " << file_path;
+ return nullptr;
+ }
+ }
+
+ // Call |progress| only when the total percentage would significantly change.
+ int permille = -1;
+ uint64_t total_bytes_written = 0;
+ auto on_progress = [&](uint64_t written, uint64_t) -> bool {
+ uint64_t actual_written = total_bytes_written + written;
+ int new_permille = (actual_written * 1000) / file_size;
+ if (new_permille != permille && actual_written < file_size) {
+ if (progress && !progress(actual_written, file_size)) {
+ return false;
+ }
+ permille = new_permille;
+ }
+ return true;
+ };
+
+ std::unique_ptr<SplitFiemap> out(new SplitFiemap());
+ out->creating_ = true;
+ out->list_file_ = file_path;
+
+ // Create the split files.
+ uint64_t remaining_bytes = file_size;
+ while (remaining_bytes) {
+ if (out->files_.size() >= kMaxFilePieces) {
+ LOG(ERROR) << "Requested size " << file_size << " created too many split files";
+ return nullptr;
+ }
+ std::string chunk_path =
+ android::base::StringPrintf("%s.%04d", file_path.c_str(), (int)out->files_.size());
+ uint64_t chunk_size = std::min(max_piece_size, remaining_bytes);
+ auto writer = FiemapWriter::Open(chunk_path, chunk_size, true, on_progress);
+ if (!writer) {
+ return nullptr;
+ }
+
+ // To make sure the alignment doesn't create too much inconsistency, we
+ // account the *actual* size, not the requested size.
+ total_bytes_written += writer->size();
+ remaining_bytes -= writer->size();
+
+ out->AddFile(std::move(writer));
+ }
+
+ // Create the split file list.
+ unique_fd fd(open(out->list_file_.c_str(), O_CREAT | O_WRONLY | O_CLOEXEC, 0660));
+ if (fd < 0) {
+ PLOG(ERROR) << "Failed to open " << file_path;
+ return nullptr;
+ }
+
+ for (const auto& writer : out->files_) {
+ std::string line = android::base::Basename(writer->file_path()) + "\n";
+ if (!android::base::WriteFully(fd, line.data(), line.size())) {
+ PLOG(ERROR) << "Write failed " << file_path;
+ return nullptr;
+ }
+ }
+
+ // Unset this bit, so we don't unlink on destruction.
+ out->creating_ = false;
+ return out;
+}
+
+std::unique_ptr<SplitFiemap> SplitFiemap::Open(const std::string& file_path) {
+ std::vector<std::string> files;
+ if (!GetSplitFileList(file_path, &files)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SplitFiemap> out(new SplitFiemap());
+ out->list_file_ = file_path;
+
+ for (const auto& file : files) {
+ auto writer = FiemapWriter::Open(file, 0, false);
+ if (!writer) {
+ // Error was logged in Open().
+ return nullptr;
+ }
+ out->AddFile(std::move(writer));
+ }
+ return out;
+}
+
+bool SplitFiemap::GetSplitFileList(const std::string& file_path, std::vector<std::string>* list) {
+ // This is not the most efficient thing, but it is simple and recovering
+ // the fiemap/fibmap is much more expensive.
+ std::string contents;
+ if (!android::base::ReadFileToString(file_path, &contents, true)) {
+ PLOG(ERROR) << "Error reading file: " << file_path;
+ return false;
+ }
+
+ std::vector<std::string> names = android::base::Split(contents, "\n");
+ std::string dir = android::base::Dirname(file_path);
+ for (const auto& name : names) {
+ if (!name.empty()) {
+ list->emplace_back(dir + "/" + name);
+ }
+ }
+ return true;
+}
+
+bool SplitFiemap::RemoveSplitFiles(const std::string& file_path, std::string* message) {
+ // Early exit if this does not exist, and do not report an error.
+ if (access(file_path.c_str(), F_OK) && errno == ENOENT) {
+ return true;
+ }
+
+ bool ok = true;
+ std::vector<std::string> files;
+ if (GetSplitFileList(file_path, &files)) {
+ for (const auto& file : files) {
+ ok &= android::base::RemoveFileIfExists(file, message);
+ }
+ }
+ ok &= android::base::RemoveFileIfExists(file_path, message);
+ return ok;
+}
+
+const std::vector<struct fiemap_extent>& SplitFiemap::extents() {
+ if (extents_.empty()) {
+ for (const auto& file : files_) {
+ const auto& extents = file->extents();
+ extents_.insert(extents_.end(), extents.begin(), extents.end());
+ }
+ }
+ return extents_;
+}
+
+SplitFiemap::~SplitFiemap() {
+ if (!creating_) {
+ return;
+ }
+
+ // We failed to finish creating, so unlink everything.
+ unlink(list_file_.c_str());
+ for (auto&& file : files_) {
+ std::string path = file->file_path();
+ file = nullptr;
+
+ unlink(path.c_str());
+ }
+}
+
+void SplitFiemap::AddFile(FiemapUniquePtr&& file) {
+ total_size_ += file->size();
+ files_.emplace_back(std::move(file));
+}
+
+uint32_t SplitFiemap::block_size() const {
+ return files_[0]->block_size();
+}
+
+} // namespace fiemap_writer
+} // namespace android
diff --git a/fs_mgr/libfiemap_writer/utility.cpp b/fs_mgr/libfiemap_writer/utility.cpp
new file mode 100644
index 0000000..192ec16
--- /dev/null
+++ b/fs_mgr/libfiemap_writer/utility.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "utility.h"
+
+#include <stdint.h>
+#include <sys/vfs.h>
+#include <unistd.h>
+
+#include <android-base/logging.h>
+#include <libfiemap_writer/fiemap_writer.h>
+
+namespace android {
+namespace fiemap_writer {
+
+uint64_t DetermineMaximumFileSize(const std::string& file_path) {
+ // Create the smallest file possible (one block).
+ auto writer = FiemapWriter::Open(file_path, 1);
+ if (!writer) {
+ return 0;
+ }
+
+ uint64_t result = 0;
+ switch (writer->fs_type()) {
+ case EXT4_SUPER_MAGIC:
+ // The minimum is 16GiB, so just report that. If we wanted we could parse the
+ // superblock and figure out if 64-bit support is enabled.
+ result = 17179869184ULL;
+ break;
+ case F2FS_SUPER_MAGIC:
+ // Formula is from https://www.kernel.org/doc/Documentation/filesystems/f2fs.txt
+ // 4KB * (923 + 2 * 1018 + 2 * 1018 * 1018 + 1018 * 1018 * 1018) := 3.94TB.
+ result = 4329690886144ULL;
+ break;
+ case MSDOS_SUPER_MAGIC:
+ // 4GB-1, which we want aligned to the block size.
+ result = 4294967295;
+ result -= (result % writer->block_size());
+ break;
+ default:
+ LOG(ERROR) << "Unknown file system type: " << writer->fs_type();
+ break;
+ }
+
+ // Close and delete the temporary file.
+ writer = nullptr;
+ unlink(file_path.c_str());
+
+ return result;
+}
+
+} // namespace fiemap_writer
+} // namespace android
diff --git a/fs_mgr/libfiemap_writer/utility.h b/fs_mgr/libfiemap_writer/utility.h
new file mode 100644
index 0000000..2d418da
--- /dev/null
+++ b/fs_mgr/libfiemap_writer/utility.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+namespace android {
+namespace fiemap_writer {
+
+// Given a file that will be created, determine the maximum size its containing
+// filesystem allows. Note this is a theoretical maximum size; free space is
+// ignored entirely.
+uint64_t DetermineMaximumFileSize(const std::string& file_path);
+
+} // namespace fiemap_writer
+} // namespace android
diff --git a/fs_mgr/libfs_avb/avb_util.cpp b/fs_mgr/libfs_avb/avb_util.cpp
index 7d89902..f4e4d4e 100644
--- a/fs_mgr/libfs_avb/avb_util.cpp
+++ b/fs_mgr/libfs_avb/avb_util.cpp
@@ -326,7 +326,7 @@
return false;
}
-bool ValidatePublicKeyBlob(const std::string key_blob_to_validate,
+bool ValidatePublicKeyBlob(const std::string& key_blob_to_validate,
const std::vector<std::string>& allowed_key_paths) {
std::string allowed_key_blob;
if (key_blob_to_validate.empty()) {
diff --git a/fs_mgr/libfs_avb/avb_util.h b/fs_mgr/libfs_avb/avb_util.h
index 986a69a..09c786a 100644
--- a/fs_mgr/libfs_avb/avb_util.h
+++ b/fs_mgr/libfs_avb/avb_util.h
@@ -80,7 +80,7 @@
bool ValidatePublicKeyBlob(const uint8_t* key, size_t length, const std::string& expected_key_blob);
-bool ValidatePublicKeyBlob(const std::string key_blob_to_validate,
+bool ValidatePublicKeyBlob(const std::string& key_blob_to_validate,
const std::vector<std::string>& expected_key_paths);
// Detects if whether a partition contains a rollback image.
diff --git a/fs_mgr/liblp/builder.cpp b/fs_mgr/liblp/builder.cpp
index c39fbe7..ebd6997 100644
--- a/fs_mgr/liblp/builder.cpp
+++ b/fs_mgr/liblp/builder.cpp
@@ -550,11 +550,11 @@
}
bool MetadataBuilder::ValidatePartitionSizeChange(Partition* partition, uint64_t old_size,
- uint64_t new_size) {
+ uint64_t new_size, bool force_check) {
PartitionGroup* group = FindGroup(partition->group_name());
CHECK(group);
- if (new_size <= old_size) {
+ if (!force_check && new_size <= old_size) {
return true;
}
@@ -861,7 +861,7 @@
uint64_t aligned_size = AlignTo(requested_size, geometry_.logical_block_size);
uint64_t old_size = partition->size();
- if (!ValidatePartitionSizeChange(partition, old_size, aligned_size)) {
+ if (!ValidatePartitionSizeChange(partition, old_size, aligned_size, false)) {
return false;
}
@@ -973,7 +973,12 @@
ImportExtents(partition, metadata, source);
- if (!ValidatePartitionSizeChange(partition, 0, partition->size())) {
+ // Note: we've already increased the partition size by calling
+ // ImportExtents(). In order to figure out the size before that,
+ // we would have to iterate the extents and add up the linear
+ // segments. Instead, we just force ValidatePartitionSizeChange
+ // to check if the current configuration is acceptable.
+ if (!ValidatePartitionSizeChange(partition, partition->size(), partition->size(), true)) {
partition->RemoveExtents();
return false;
}
diff --git a/fs_mgr/liblp/builder_test.cpp b/fs_mgr/liblp/builder_test.cpp
index 69724f8..81305b3 100644
--- a/fs_mgr/liblp/builder_test.cpp
+++ b/fs_mgr/liblp/builder_test.cpp
@@ -835,3 +835,73 @@
EXPECT_EQ(exported->extents[1].target_data, 4608);
EXPECT_EQ(exported->extents[1].num_sectors, 1536);
}
+
+TEST_F(BuilderTest, UpdateSuper) {
+ // Build the on-disk metadata that we saw before flashing.
+ auto builder = MetadataBuilder::New(8145338368ULL, 65536, 3);
+ ASSERT_NE(builder, nullptr);
+
+ ASSERT_TRUE(builder->AddGroup("google_dynamic_partitions_a", 4068474880ULL));
+ ASSERT_TRUE(builder->AddGroup("google_dynamic_partitions_b", 4068474880ULL));
+
+ Partition* partition = builder->AddPartition("system_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 1901568, 3608576));
+
+ partition = builder->AddPartition("vendor_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 1521664, 5510144));
+
+ partition = builder->AddPartition("product_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 3606528, 2048));
+
+ partition = builder->AddPartition("system_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 1901568, 7955456));
+
+ partition = builder->AddPartition("vendor_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 1521664, 9857024));
+
+ partition = builder->AddPartition("product_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY);
+ ASSERT_NE(partition, nullptr);
+ ASSERT_TRUE(builder->AddLinearExtent(partition, "super", 3606528, 11378688));
+
+ auto on_disk = builder->Export();
+ ASSERT_NE(on_disk, nullptr);
+
+ // Build the super_empty from the new build.
+ builder = MetadataBuilder::New(8145338368ULL, 65536, 3);
+ ASSERT_NE(builder, nullptr);
+
+ ASSERT_TRUE(builder->AddGroup("google_dynamic_partitions_a", 4068474880ULL));
+ ASSERT_TRUE(builder->AddGroup("google_dynamic_partitions_b", 4068474880ULL));
+ ASSERT_NE(builder->AddPartition("system_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+ ASSERT_NE(builder->AddPartition("system_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+ ASSERT_NE(builder->AddPartition("vendor_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+ ASSERT_NE(builder->AddPartition("vendor_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+ ASSERT_NE(builder->AddPartition("product_a", "google_dynamic_partitions_a",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+ ASSERT_NE(builder->AddPartition("product_b", "google_dynamic_partitions_b",
+ LP_PARTITION_ATTR_READONLY),
+ nullptr);
+
+ std::set<std::string> partitions_to_keep{"system_a", "vendor_a", "product_a"};
+ ASSERT_TRUE(builder->ImportPartitions(*on_disk.get(), partitions_to_keep));
+}
diff --git a/fs_mgr/liblp/include/liblp/builder.h b/fs_mgr/liblp/include/liblp/builder.h
index 53f480f..486a71f 100644
--- a/fs_mgr/liblp/include/liblp/builder.h
+++ b/fs_mgr/liblp/include/liblp/builder.h
@@ -297,7 +297,8 @@
uint64_t TotalSizeOfGroup(PartitionGroup* group) const;
bool UpdateBlockDeviceInfo(size_t index, const BlockDeviceInfo& info);
bool FindBlockDeviceByName(const std::string& partition_name, uint32_t* index) const;
- bool ValidatePartitionSizeChange(Partition* partition, uint64_t old_size, uint64_t new_size);
+ bool ValidatePartitionSizeChange(Partition* partition, uint64_t old_size, uint64_t new_size,
+ bool force_check);
void ImportExtents(Partition* dest, const LpMetadata& metadata,
const LpMetadataPartition& source);
bool ImportPartition(const LpMetadata& metadata, const LpMetadataPartition& source);
diff --git a/fs_mgr/tests/adb-remount-test.sh b/fs_mgr/tests/adb-remount-test.sh
index 1ded954..a6baf1d 100755
--- a/fs_mgr/tests/adb-remount-test.sh
+++ b/fs_mgr/tests/adb-remount-test.sh
@@ -620,7 +620,7 @@
echo "${GREEN}[ RUN ]${NORMAL} Testing adb shell su root remount -R command" >&2
- adb_su remount -R </dev/null || true
+ adb_su remount -R system </dev/null || true
sleep 2
adb_wait 2m ||
die "waiting for device after remount -R `usb_status`"
@@ -1159,10 +1159,12 @@
die "lost device after reboot to ro state (USB stack broken?)"
adb_sh grep " /vendor .* rw," /proc/mounts >/dev/null </dev/null &&
die "/vendor is not read-only"
-adb_su remount </dev/null ||
+adb_su remount vendor </dev/null ||
die "remount command"
adb_sh grep " /vendor .* rw," /proc/mounts >/dev/null </dev/null ||
die "/vendor is not read-write"
+adb_sh grep " /system .* rw," /proc/mounts >/dev/null </dev/null &&
+ die "/vendor is not read-only"
echo "${GREEN}[ OK ]${NORMAL} remount command works from setup" >&2
# Prerequisite is an overlayfs deconstructed device but with verity disabled.
@@ -1177,10 +1179,12 @@
die "lost device after reboot after wipe (USB stack broken?)"
adb_sh grep " /vendor .* rw," /proc/mounts >/dev/null </dev/null &&
die "/vendor is not read-only"
-adb_su remount </dev/null ||
+adb_su remount vendor </dev/null ||
die "remount command"
adb_sh grep " /vendor .* rw," /proc/mounts >/dev/null </dev/null ||
die "/vendor is not read-write"
+adb_sh grep " /system .* rw," /proc/mounts >/dev/null </dev/null &&
+ die "/system is not read-only"
echo "${GREEN}[ OK ]${NORMAL} remount command works from scratch" >&2
restore
diff --git a/fs_mgr/tests/fs_mgr_test.cpp b/fs_mgr/tests/fs_mgr_test.cpp
index 6afc8d2..72afa69 100644
--- a/fs_mgr/tests/fs_mgr_test.cpp
+++ b/fs_mgr/tests/fs_mgr_test.cpp
@@ -948,13 +948,14 @@
ASSERT_TRUE(tf.fd != -1);
std::string fstab_contents = R"fs(
source none0 swap defaults avb=vbmeta_partition
+source none1 swap defaults avb_keys=/path/to/test.avbpubkey
)fs";
ASSERT_TRUE(android::base::WriteStringToFile(fstab_contents, tf.path));
Fstab fstab;
EXPECT_TRUE(ReadFstabFromFile(tf.path, &fstab));
- ASSERT_EQ(1U, fstab.size());
+ ASSERT_EQ(2U, fstab.size());
auto entry = fstab.begin();
EXPECT_EQ("none0", entry->mount_point);
@@ -964,6 +965,12 @@
EXPECT_TRUE(CompareFlags(flags, entry->fs_mgr_flags));
EXPECT_EQ("vbmeta_partition", entry->vbmeta_partition);
+ entry++;
+
+ EXPECT_EQ("none1", entry->mount_point);
+ FstabEntry::FsMgrFlags empty_flags = {}; // no flags should be set for avb_keys.
+ EXPECT_TRUE(CompareFlags(empty_flags, entry->fs_mgr_flags));
+ EXPECT_EQ("/path/to/test.avbpubkey", entry->avb_keys);
}
TEST(fs_mgr, ReadFstabFromFile_FsMgrOptions_KeyDirectory) {
diff --git a/init/builtins.cpp b/init/builtins.cpp
index 538ed00..6511d29 100644
--- a/init/builtins.cpp
+++ b/init/builtins.cpp
@@ -1119,13 +1119,21 @@
}
static Result<Success> do_setup_runtime_bionic(const BuiltinArguments& args) {
- if (SwitchToDefaultMountNamespace()) {
+ if (SetupRuntimeBionic()) {
return Success();
} else {
return Error() << "Failed to setup runtime bionic";
}
}
+static Result<Success> do_enter_default_mount_ns(const BuiltinArguments& args) {
+ if (SwitchToDefaultMountNamespace()) {
+ return Success();
+ } else {
+ return Error() << "Failed to enter into default mount namespace";
+ }
+}
+
// Builtin-function-map start
const BuiltinFunctionMap::Map& BuiltinFunctionMap::map() const {
constexpr std::size_t kMax = std::numeric_limits<std::size_t>::max();
@@ -1177,6 +1185,7 @@
{"start", {1, 1, {false, do_start}}},
{"stop", {1, 1, {false, do_stop}}},
{"swapon_all", {1, 1, {false, do_swapon_all}}},
+ {"enter_default_mount_ns", {0, 0, {false, do_enter_default_mount_ns}}},
{"symlink", {2, 2, {true, do_symlink}}},
{"sysclktz", {1, 1, {false, do_sysclktz}}},
{"trigger", {1, 1, {false, do_trigger}}},
diff --git a/init/first_stage_init.cpp b/init/first_stage_init.cpp
index e11d897..7cf4c3f 100644
--- a/init/first_stage_init.cpp
+++ b/init/first_stage_init.cpp
@@ -155,6 +155,10 @@
// part of the product partition, e.g. because they are mounted read-write.
CHECKCALL(mkdir("/mnt/product", 0755));
+ // /apex is used to mount APEXes
+ CHECKCALL(mount("tmpfs", "/apex", "tmpfs", MS_NOEXEC | MS_NOSUID | MS_NODEV,
+ "mode=0755,uid=0,gid=0"));
+
#undef CHECKCALL
// Now that tmpfs is mounted on /dev and we have /dev/kmsg, we can actually
diff --git a/init/mount_namespace.cpp b/init/mount_namespace.cpp
index 413fe8f..327446a 100644
--- a/init/mount_namespace.cpp
+++ b/init/mount_namespace.cpp
@@ -172,6 +172,11 @@
kBionicLibsMountPointDir64))
return false;
+ // /apex is also a private mountpoint to give different sets of APEXes for
+ // the bootstrap and default mount namespaces. The processes running with
+ // the bootstrap namespace get APEXes from the read-only partition.
+ if (!(MakePrivate("/apex"))) return false;
+
bootstrap_ns_fd.reset(OpenMountNamespace());
bootstrap_ns_id = GetMountNamespaceId();
@@ -227,6 +232,17 @@
}
}
+ LOG(INFO) << "Switched to default mount namespace";
+ return true;
+}
+
+// TODO(jiyong): remove this when /system/lib/libc.so becomes
+// a symlink to /apex/com.android.runtime/lib/bionic/libc.so
+bool SetupRuntimeBionic() {
+ if (IsRecoveryMode()) {
+ // We don't have multiple namespaces in recovery mode
+ return true;
+ }
// Bind-mount bionic from the runtime APEX since it is now available. Note
// that in case of IsBionicUpdatable() == false, these mounts are over the
// existing existing bind mounts for the bootstrap bionic, which effectively
@@ -238,7 +254,7 @@
kBionicLibsMountPointDir64))
return false;
- LOG(INFO) << "Switched to default mount namespace";
+ LOG(INFO) << "Runtime bionic is set up";
return true;
}
diff --git a/init/mount_namespace.h b/init/mount_namespace.h
index c41a449..4eef785 100644
--- a/init/mount_namespace.h
+++ b/init/mount_namespace.h
@@ -20,6 +20,7 @@
namespace init {
bool SetupMountNamespaces();
+bool SetupRuntimeBionic();
bool SwitchToDefaultMountNamespace();
bool SwitchToBootstrapMountNamespaceIfNeeded();
diff --git a/init/selinux.cpp b/init/selinux.cpp
index ee302c1..3fadfed 100644
--- a/init/selinux.cpp
+++ b/init/selinux.cpp
@@ -459,6 +459,8 @@
selinux_android_restorecon("/dev/block", SELINUX_ANDROID_RESTORECON_RECURSE);
selinux_android_restorecon("/dev/device-mapper", 0);
+
+ selinux_android_restorecon("/apex", 0);
}
int SelinuxKlogCallback(int type, const char* fmt, ...) {
diff --git a/libmeminfo/tools/librank.cpp b/libmeminfo/tools/librank.cpp
index 2c2583d..e53c746 100644
--- a/libmeminfo/tools/librank.cpp
+++ b/libmeminfo/tools/librank.cpp
@@ -26,6 +26,7 @@
#include <unistd.h>
#include <algorithm>
+#include <map>
#include <memory>
#include <vector>
@@ -122,30 +123,22 @@
const std::string& name() const { return name_; }
const MemUsage& usage() const { return usage_; }
- const std::vector<ProcessRecord>& processes() const { return procs_; }
+ const std::map<pid_t, ProcessRecord>& processes() const { return procs_; }
uint64_t pss() const { return usage_.pss; }
void AddUsage(const ProcessRecord& proc, const MemUsage& mem_usage) {
- auto process = std::find_if(procs_.begin(), procs_.end(),
- [&](auto p) -> bool { return p.pid() == proc.pid(); });
- if (process == procs_.end()) {
- process = procs_.emplace(procs_.end(), proc.pid());
- }
- process->AddUsage(mem_usage);
+ auto [it, inserted] = procs_.insert(std::pair<pid_t, ProcessRecord>(proc.pid(), proc));
+ it->second.AddUsage(mem_usage);
add_mem_usage(&usage_, mem_usage);
}
- void Sort(std::function<bool(const ProcessRecord&, const ProcessRecord&)>& sorter) {
- std::sort(procs_.begin(), procs_.end(), sorter);
- }
-
private:
std::string name_;
MemUsage usage_;
- std::vector<ProcessRecord> procs_;
+ std::map<pid_t, ProcessRecord> procs_;
};
// List of every library / map
-static std::vector<LibRecord> g_libs;
+static std::map<std::string, LibRecord> g_libs;
// List of library/map names that we don't want to show by default
static const std::vector<std::string> g_blacklisted_libs = {"[heap]", "[stack]"};
@@ -204,13 +197,10 @@
continue;
}
- auto lib = std::find_if(g_libs.begin(), g_libs.end(),
- [&](auto l) -> bool { return map.name == l.name(); });
- if (lib == g_libs.end()) {
- lib = g_libs.emplace(g_libs.end(), map.name);
- }
+ auto [it, inserted] =
+ g_libs.insert(std::pair<std::string, LibRecord>(map.name, LibRecord(map.name)));
+ it->second.AddUsage(proc, map.usage);
- lib->AddUsage(proc, map.usage);
if (!g_has_swap && map.usage.swap) {
g_has_swap = true;
}
@@ -321,11 +311,16 @@
}
printf("Name/PID\n");
+ std::vector<LibRecord> v_libs;
+ v_libs.reserve(g_libs.size());
+ std::transform(g_libs.begin(), g_libs.end(), std::back_inserter(v_libs),
+ [] (std::pair<std::string, LibRecord> const& pair) { return pair.second; });
+
// sort the libraries by their pss
- std::sort(g_libs.begin(), g_libs.end(),
+ std::sort(v_libs.begin(), v_libs.end(),
[](const LibRecord& l1, const LibRecord& l2) { return l1.pss() > l2.pss(); });
- for (auto& lib : g_libs) {
+ for (auto& lib : v_libs) {
printf("%6" PRIu64 "K %7s %6s %6s %6s ", lib.pss() / 1024, "", "", "", "");
if (g_has_swap) {
printf(" %6s ", "");
@@ -333,9 +328,15 @@
printf("%s\n", lib.name().c_str());
// sort all mappings first
- lib.Sort(sort_func);
- for (auto& p : lib.processes()) {
+ std::vector<ProcessRecord> procs;
+ procs.reserve(lib.processes().size());
+ std::transform(lib.processes().begin(), lib.processes().end(), std::back_inserter(procs),
+ [] (std::pair<pid_t, ProcessRecord> const& pair) { return pair.second; });
+
+ std::sort(procs.begin(), procs.end(), sort_func);
+
+ for (auto& p : procs) {
const MemUsage& usage = p.usage();
printf(" %6s %7" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K ", "",
usage.vss / 1024, usage.rss / 1024, usage.pss / 1024, usage.uss / 1024);
diff --git a/libprocessgroup/cgroup_map.cpp b/libprocessgroup/cgroup_map.cpp
index d094811..26d0754 100644
--- a/libprocessgroup/cgroup_map.cpp
+++ b/libprocessgroup/cgroup_map.cpp
@@ -45,6 +45,7 @@
using android::base::unique_fd;
static constexpr const char* CGROUPS_DESC_FILE = "/etc/cgroups.json";
+static constexpr const char* CGROUPS_DESC_VENDOR_FILE = "/vendor/etc/cgroups.json";
static constexpr const char* CGROUP_PROCS_FILE = "/cgroup.procs";
static constexpr const char* CGROUP_TASKS_FILE = "/tasks";
@@ -110,12 +111,13 @@
return true;
}
-static bool ReadDescriptors(std::map<std::string, CgroupDescriptor>* descriptors) {
+static bool ReadDescriptorsFromFile(const std::string& file_name,
+ std::map<std::string, CgroupDescriptor>* descriptors) {
std::vector<CgroupDescriptor> result;
std::string json_doc;
- if (!android::base::ReadFileToString(CGROUPS_DESC_FILE, &json_doc)) {
- LOG(ERROR) << "Failed to read task profiles from " << CGROUPS_DESC_FILE;
+ if (!android::base::ReadFileToString(file_name, &json_doc)) {
+ LOG(ERROR) << "Failed to read task profiles from " << file_name;
return false;
}
@@ -130,21 +132,46 @@
const Json::Value& cgroups = root["Cgroups"];
for (Json::Value::ArrayIndex i = 0; i < cgroups.size(); ++i) {
std::string name = cgroups[i]["Controller"].asString();
- descriptors->emplace(std::make_pair(
- name,
- CgroupDescriptor(1, name, cgroups[i]["Path"].asString(),
+ auto iter = descriptors->find(name);
+ if (iter == descriptors->end()) {
+ descriptors->emplace(name, CgroupDescriptor(1, name, cgroups[i]["Path"].asString(),
std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8),
- cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString())));
+ cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString()));
+ } else {
+ iter->second = CgroupDescriptor(1, name, cgroups[i]["Path"].asString(),
+ std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8),
+ cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString());
+ }
}
}
if (root.isMember("Cgroups2")) {
const Json::Value& cgroups2 = root["Cgroups2"];
- descriptors->emplace(std::make_pair(
- CGROUPV2_CONTROLLER_NAME,
- CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(),
+ auto iter = descriptors->find(CGROUPV2_CONTROLLER_NAME);
+ if (iter == descriptors->end()) {
+ descriptors->emplace(CGROUPV2_CONTROLLER_NAME, CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(),
std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8),
- cgroups2["UID"].asString(), cgroups2["GID"].asString())));
+ cgroups2["UID"].asString(), cgroups2["GID"].asString()));
+ } else {
+ iter->second = CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(),
+ std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8),
+ cgroups2["UID"].asString(), cgroups2["GID"].asString());
+ }
+ }
+
+ return true;
+}
+
+static bool ReadDescriptors(std::map<std::string, CgroupDescriptor>* descriptors) {
+ // load system cgroup descriptors
+ if (!ReadDescriptorsFromFile(CGROUPS_DESC_FILE, descriptors)) {
+ return false;
+ }
+
+ // load vendor cgroup descriptors if the file exists
+ if (!access(CGROUPS_DESC_VENDOR_FILE, F_OK) &&
+ !ReadDescriptorsFromFile(CGROUPS_DESC_VENDOR_FILE, descriptors)) {
+ return false;
}
return true;
diff --git a/libprocessgroup/processgroup.cpp b/libprocessgroup/processgroup.cpp
index 8505e61..8884650 100644
--- a/libprocessgroup/processgroup.cpp
+++ b/libprocessgroup/processgroup.cpp
@@ -170,8 +170,9 @@
return ret;
}
-static void RemoveUidProcessGroups(const std::string& uid_path) {
+static bool RemoveUidProcessGroups(const std::string& uid_path) {
std::unique_ptr<DIR, decltype(&closedir)> uid(opendir(uid_path.c_str()), closedir);
+ bool empty = true;
if (uid != NULL) {
dirent* dir;
while ((dir = readdir(uid.get())) != nullptr) {
@@ -185,9 +186,15 @@
auto path = StringPrintf("%s/%s", uid_path.c_str(), dir->d_name);
LOG(VERBOSE) << "Removing " << path;
- if (rmdir(path.c_str()) == -1) PLOG(WARNING) << "Failed to remove " << path;
+ if (rmdir(path.c_str()) == -1) {
+ if (errno != EBUSY) {
+ PLOG(WARNING) << "Failed to remove " << path;
+ }
+ empty = false;
+ }
}
}
+ return empty;
}
void removeAllProcessGroups() {
@@ -219,9 +226,14 @@
}
auto path = StringPrintf("%s/%s", cgroup_root_path.c_str(), dir->d_name);
- RemoveUidProcessGroups(path);
+ if (!RemoveUidProcessGroups(path)) {
+ LOG(VERBOSE) << "Skip removing " << path;
+ continue;
+ }
LOG(VERBOSE) << "Removing " << path;
- if (rmdir(path.c_str()) == -1) PLOG(WARNING) << "Failed to remove " << path;
+ if (rmdir(path.c_str()) == -1 && errno != EBUSY) {
+ PLOG(WARNING) << "Failed to remove " << path;
+ }
}
}
}
@@ -249,6 +261,10 @@
auto path = ConvertUidPidToPath(cgroup, uid, initialPid) + PROCESSGROUP_CGROUP_PROCS_FILE;
std::unique_ptr<FILE, decltype(&fclose)> fd(fopen(path.c_str(), "re"), fclose);
if (!fd) {
+ if (errno == ENOENT) {
+ // This happens when process is already dead
+ return 0;
+ }
PLOG(WARNING) << "Failed to open process cgroup uid " << uid << " pid " << initialPid;
return -1;
}
@@ -293,7 +309,7 @@
LOG(VERBOSE) << "Killing process group " << -pgid << " in uid " << uid
<< " as part of process cgroup " << initialPid;
- if (kill(-pgid, signal) == -1) {
+ if (kill(-pgid, signal) == -1 && errno != ESRCH) {
PLOG(WARNING) << "kill(" << -pgid << ", " << signal << ") failed";
}
}
@@ -303,7 +319,7 @@
LOG(VERBOSE) << "Killing pid " << pid << " in uid " << uid << " as part of process cgroup "
<< initialPid;
- if (kill(pid, signal) == -1) {
+ if (kill(pid, signal) == -1 && errno != ESRCH) {
PLOG(WARNING) << "kill(" << pid << ", " << signal << ") failed";
}
}
diff --git a/libprocessgroup/profiles/task_profiles.json b/libprocessgroup/profiles/task_profiles.json
index 985720f..74a39cd 100644
--- a/libprocessgroup/profiles/task_profiles.json
+++ b/libprocessgroup/profiles/task_profiles.json
@@ -56,10 +56,10 @@
"Profiles": [
{
"Name": "HighEnergySaving",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "schedtune",
"Path": "background"
@@ -69,10 +69,10 @@
},
{
"Name": "NormalPerformance",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "schedtune",
"Path": ""
@@ -82,10 +82,10 @@
},
{
"Name": "HighPerformance",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "schedtune",
"Path": "foreground"
@@ -95,10 +95,10 @@
},
{
"Name": "MaxPerformance",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "schedtune",
"Path": "top-app"
@@ -108,10 +108,10 @@
},
{
"Name": "RealtimePerformance",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "schedtune",
"Path": "rt"
@@ -122,26 +122,26 @@
{
"Name": "CpuPolicySpread",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "STunePreferIdle",
- "Value" : "1"
+ "Name": "STunePreferIdle",
+ "Value": "1"
}
}
]
},
{
"Name": "CpuPolicyPack",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "STunePreferIdle",
- "Value" : "0"
+ "Name": "STunePreferIdle",
+ "Value": "0"
}
}
]
@@ -149,10 +149,10 @@
{
"Name": "VrKernelCapacity",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": ""
@@ -162,10 +162,10 @@
},
{
"Name": "VrServiceCapacityLow",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "system/background"
@@ -175,10 +175,10 @@
},
{
"Name": "VrServiceCapacityNormal",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "system"
@@ -188,10 +188,10 @@
},
{
"Name": "VrServiceCapacityHigh",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "system/performance"
@@ -201,10 +201,10 @@
},
{
"Name": "VrProcessCapacityLow",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "application/background"
@@ -214,10 +214,10 @@
},
{
"Name": "VrProcessCapacityNormal",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "application"
@@ -227,10 +227,10 @@
},
{
"Name": "VrProcessCapacityHigh",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "application/performance"
@@ -241,10 +241,10 @@
{
"Name": "ProcessCapacityLow",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "background"
@@ -254,10 +254,10 @@
},
{
"Name": "ProcessCapacityNormal",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": ""
@@ -267,10 +267,10 @@
},
{
"Name": "ProcessCapacityHigh",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "foreground"
@@ -280,10 +280,10 @@
},
{
"Name": "ProcessCapacityMax",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "top-app"
@@ -294,10 +294,10 @@
{
"Name": "ServiceCapacityLow",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "system-background"
@@ -307,10 +307,10 @@
},
{
"Name": "ServiceCapacityRestricted",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "restricted"
@@ -321,10 +321,10 @@
{
"Name": "CameraServiceCapacity",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "cpuset",
"Path": "camera-daemon"
@@ -335,10 +335,10 @@
{
"Name": "LowIoPriority",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "blkio",
"Path": "background"
@@ -348,10 +348,10 @@
},
{
"Name": "NormalIoPriority",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "blkio",
"Path": ""
@@ -361,10 +361,10 @@
},
{
"Name": "HighIoPriority",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "blkio",
"Path": ""
@@ -374,10 +374,10 @@
},
{
"Name": "MaxIoPriority",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "blkio",
"Path": ""
@@ -388,10 +388,10 @@
{
"Name": "TimerSlackHigh",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetTimerSlack",
- "Params" :
+ "Name": "SetTimerSlack",
+ "Params":
{
"Slack": "40000000"
}
@@ -400,10 +400,10 @@
},
{
"Name": "TimerSlackNormal",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetTimerSlack",
- "Params" :
+ "Name": "SetTimerSlack",
+ "Params":
{
"Slack": "50000"
}
@@ -413,26 +413,26 @@
{
"Name": "PerfBoost",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetClamps",
- "Params" :
+ "Name": "SetClamps",
+ "Params":
{
- "Boost" : "50%",
- "Clamp" : "0"
+ "Boost": "50%",
+ "Clamp": "0"
}
}
]
},
{
"Name": "PerfClamp",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetClamps",
- "Params" :
+ "Name": "SetClamps",
+ "Params":
{
- "Boost" : "0",
- "Clamp" : "30%"
+ "Boost": "0",
+ "Clamp": "30%"
}
}
]
@@ -440,21 +440,21 @@
{
"Name": "LowMemoryUsage",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "MemSoftLimit",
- "Value" : "16MB"
+ "Name": "MemSoftLimit",
+ "Value": "16MB"
}
},
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "MemSwappiness",
- "Value" : "150"
+ "Name": "MemSwappiness",
+ "Value": "150"
}
}
@@ -462,31 +462,31 @@
},
{
"Name": "HighMemoryUsage",
- "Actions" : [
+ "Actions": [
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "MemSoftLimit",
- "Value" : "512MB"
+ "Name": "MemSoftLimit",
+ "Value": "512MB"
}
},
{
- "Name" : "SetAttribute",
- "Params" :
+ "Name": "SetAttribute",
+ "Params":
{
- "Name" : "MemSwappiness",
- "Value" : "100"
+ "Name": "MemSwappiness",
+ "Value": "100"
}
}
]
},
{
"Name": "SystemMemoryProcess",
- "Actions" : [
+ "Actions": [
{
- "Name" : "JoinCgroup",
- "Params" :
+ "Name": "JoinCgroup",
+ "Params":
{
"Controller": "memory",
"Path": "system"
diff --git a/libprocessgroup/task_profiles.cpp b/libprocessgroup/task_profiles.cpp
index 8ec29d3..fded417 100644
--- a/libprocessgroup/task_profiles.cpp
+++ b/libprocessgroup/task_profiles.cpp
@@ -42,6 +42,7 @@
using android::base::WriteStringToFile;
#define TASK_PROFILE_DB_FILE "/etc/task_profiles.json"
+#define TASK_PROFILE_DB_VENDOR_FILE "/vendor/etc/task_profiles.json"
bool ProfileAttribute::GetPathForTask(int tid, std::string* path) const {
std::string subgroup;
@@ -288,16 +289,24 @@
}
TaskProfiles::TaskProfiles() {
- if (!Load(CgroupMap::GetInstance())) {
- LOG(ERROR) << "TaskProfiles::Load for [" << getpid() << "] failed";
+ // load system task profiles
+ if (!Load(CgroupMap::GetInstance(), TASK_PROFILE_DB_FILE)) {
+ LOG(ERROR) << "Loading " << TASK_PROFILE_DB_FILE << " for [" << getpid() << "] failed";
+ }
+
+ // load vendor task profiles if the file exists
+ if (!access(TASK_PROFILE_DB_VENDOR_FILE, F_OK) &&
+ !Load(CgroupMap::GetInstance(), TASK_PROFILE_DB_VENDOR_FILE)) {
+ LOG(ERROR) << "Loading " << TASK_PROFILE_DB_VENDOR_FILE << " for [" << getpid()
+ << "] failed";
}
}
-bool TaskProfiles::Load(const CgroupMap& cg_map) {
+bool TaskProfiles::Load(const CgroupMap& cg_map, const std::string& file_name) {
std::string json_doc;
- if (!android::base::ReadFileToString(TASK_PROFILE_DB_FILE, &json_doc)) {
- LOG(ERROR) << "Failed to read task profiles from " << TASK_PROFILE_DB_FILE;
+ if (!android::base::ReadFileToString(file_name, &json_doc)) {
+ LOG(ERROR) << "Failed to read task profiles from " << file_name;
return false;
}
@@ -308,18 +317,18 @@
return false;
}
- Json::Value attr = root["Attributes"];
+ const Json::Value& attr = root["Attributes"];
for (Json::Value::ArrayIndex i = 0; i < attr.size(); ++i) {
std::string name = attr[i]["Name"].asString();
- std::string ctrlName = attr[i]["Controller"].asString();
- std::string file_name = attr[i]["File"].asString();
+ std::string controller_name = attr[i]["Controller"].asString();
+ std::string file_attr = attr[i]["File"].asString();
if (attributes_.find(name) == attributes_.end()) {
- const CgroupController* controller = cg_map.FindController(ctrlName);
+ const CgroupController* controller = cg_map.FindController(controller_name);
if (controller) {
- attributes_[name] = std::make_unique<ProfileAttribute>(controller, file_name);
+ attributes_[name] = std::make_unique<ProfileAttribute>(controller, file_attr);
} else {
- LOG(WARNING) << "Controller " << ctrlName << " is not found";
+ LOG(WARNING) << "Controller " << controller_name << " is not found";
}
} else {
LOG(WARNING) << "Attribute " << name << " is already defined";
@@ -328,72 +337,72 @@
std::map<std::string, std::string> params;
- Json::Value profilesVal = root["Profiles"];
- for (Json::Value::ArrayIndex i = 0; i < profilesVal.size(); ++i) {
- Json::Value profileVal = profilesVal[i];
+ const Json::Value& profiles_val = root["Profiles"];
+ for (Json::Value::ArrayIndex i = 0; i < profiles_val.size(); ++i) {
+ const Json::Value& profile_val = profiles_val[i];
- std::string profileName = profileVal["Name"].asString();
- Json::Value actions = profileVal["Actions"];
+ std::string profile_name = profile_val["Name"].asString();
+ const Json::Value& actions = profile_val["Actions"];
auto profile = std::make_unique<TaskProfile>();
- for (Json::Value::ArrayIndex actIdx = 0; actIdx < actions.size(); ++actIdx) {
- Json::Value actionVal = actions[actIdx];
- std::string actionName = actionVal["Name"].asString();
- Json::Value paramsVal = actionVal["Params"];
- if (actionName == "JoinCgroup") {
- std::string ctrlName = paramsVal["Controller"].asString();
- std::string path = paramsVal["Path"].asString();
+ for (Json::Value::ArrayIndex act_idx = 0; act_idx < actions.size(); ++act_idx) {
+ const Json::Value& action_val = actions[act_idx];
+ std::string action_name = action_val["Name"].asString();
+ const Json::Value& params_val = action_val["Params"];
+ if (action_name == "JoinCgroup") {
+ std::string controller_name = params_val["Controller"].asString();
+ std::string path = params_val["Path"].asString();
- const CgroupController* controller = cg_map.FindController(ctrlName);
+ const CgroupController* controller = cg_map.FindController(controller_name);
if (controller) {
profile->Add(std::make_unique<SetCgroupAction>(controller, path));
} else {
- LOG(WARNING) << "JoinCgroup: controller " << ctrlName << " is not found";
+ LOG(WARNING) << "JoinCgroup: controller " << controller_name << " is not found";
}
- } else if (actionName == "SetTimerSlack") {
- std::string slackValue = paramsVal["Slack"].asString();
+ } else if (action_name == "SetTimerSlack") {
+ std::string slack_value = params_val["Slack"].asString();
char* end;
unsigned long slack;
- slack = strtoul(slackValue.c_str(), &end, 10);
- if (end > slackValue.c_str()) {
+ slack = strtoul(slack_value.c_str(), &end, 10);
+ if (end > slack_value.c_str()) {
profile->Add(std::make_unique<SetTimerSlackAction>(slack));
} else {
- LOG(WARNING) << "SetTimerSlack: invalid parameter: " << slackValue;
+ LOG(WARNING) << "SetTimerSlack: invalid parameter: " << slack_value;
}
- } else if (actionName == "SetAttribute") {
- std::string attrName = paramsVal["Name"].asString();
- std::string attrValue = paramsVal["Value"].asString();
+ } else if (action_name == "SetAttribute") {
+ std::string attr_name = params_val["Name"].asString();
+ std::string attr_value = params_val["Value"].asString();
- auto iter = attributes_.find(attrName);
+ auto iter = attributes_.find(attr_name);
if (iter != attributes_.end()) {
profile->Add(
- std::make_unique<SetAttributeAction>(iter->second.get(), attrValue));
+ std::make_unique<SetAttributeAction>(iter->second.get(), attr_value));
} else {
- LOG(WARNING) << "SetAttribute: unknown attribute: " << attrName;
+ LOG(WARNING) << "SetAttribute: unknown attribute: " << attr_name;
}
- } else if (actionName == "SetClamps") {
- std::string boostValue = paramsVal["Boost"].asString();
- std::string clampValue = paramsVal["Clamp"].asString();
+ } else if (action_name == "SetClamps") {
+ std::string boost_value = params_val["Boost"].asString();
+ std::string clamp_value = params_val["Clamp"].asString();
char* end;
unsigned long boost;
- boost = strtoul(boostValue.c_str(), &end, 10);
- if (end > boostValue.c_str()) {
- unsigned long clamp = strtoul(clampValue.c_str(), &end, 10);
- if (end > clampValue.c_str()) {
+ boost = strtoul(boost_value.c_str(), &end, 10);
+ if (end > boost_value.c_str()) {
+ unsigned long clamp = strtoul(clamp_value.c_str(), &end, 10);
+ if (end > clamp_value.c_str()) {
profile->Add(std::make_unique<SetClampsAction>(boost, clamp));
} else {
- LOG(WARNING) << "SetClamps: invalid parameter " << clampValue;
+ LOG(WARNING) << "SetClamps: invalid parameter " << clamp_value;
}
} else {
- LOG(WARNING) << "SetClamps: invalid parameter: " << boostValue;
+ LOG(WARNING) << "SetClamps: invalid parameter: " << boost_value;
}
} else {
- LOG(WARNING) << "Unknown profile action: " << actionName;
+ LOG(WARNING) << "Unknown profile action: " << action_name;
}
}
- profiles_[profileName] = std::move(profile);
+ profiles_[profile_name] = std::move(profile);
}
return true;
diff --git a/libprocessgroup/task_profiles.h b/libprocessgroup/task_profiles.h
index b2e39f9..9ee81c1 100644
--- a/libprocessgroup/task_profiles.h
+++ b/libprocessgroup/task_profiles.h
@@ -152,5 +152,5 @@
TaskProfiles();
- bool Load(const CgroupMap& cg_map);
+ bool Load(const CgroupMap& cg_map, const std::string& file_name);
};
diff --git a/lmkd/lmkd.c b/lmkd/lmkd.c
index 98b3aa1..18421e8 100644
--- a/lmkd/lmkd.c
+++ b/lmkd/lmkd.c
@@ -1329,7 +1329,7 @@
static int last_killed_pid = -1;
/* Kill one process specified by procp. Returns the size of the process killed */
-static int kill_one_process(struct proc* procp) {
+static int kill_one_process(struct proc* procp, int min_oom_score) {
int pid = procp->pid;
uid_t uid = procp->uid;
char *taskname;
@@ -1340,6 +1340,9 @@
#ifdef LMKD_LOG_STATS
struct memory_stat mem_st = {};
int memory_stat_parse_result = -1;
+#else
+ /* To prevent unused parameter warning */
+ (void)(min_oom_score);
#endif
taskname = proc_get_name(pid);
@@ -1385,10 +1388,12 @@
if (memory_stat_parse_result == 0) {
stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
- mem_st.cache_in_bytes, mem_st.swap_in_bytes, mem_st.process_start_time_ns);
+ mem_st.cache_in_bytes, mem_st.swap_in_bytes, mem_st.process_start_time_ns,
+ min_oom_score);
} else if (enable_stats_log) {
stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname, procp->oomadj,
- -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1, -1);
+ -1, -1, tasksize * BYTES_IN_KILOBYTE, -1, -1, -1,
+ min_oom_score);
}
#endif
result = tasksize;
@@ -1425,7 +1430,7 @@
if (!procp)
break;
- killed_size = kill_one_process(procp);
+ killed_size = kill_one_process(procp, min_score_adj);
if (killed_size >= 0) {
#ifdef LMKD_LOG_STATS
if (enable_stats_log && !lmk_state_change_start) {
diff --git a/lmkd/statslog.c b/lmkd/statslog.c
index 689e8ae..0c230ae 100644
--- a/lmkd/statslog.c
+++ b/lmkd/statslog.c
@@ -65,7 +65,8 @@
stats_write_lmk_kill_occurred(android_log_context ctx, int32_t code, int32_t uid,
char const* process_name, int32_t oom_score, int64_t pgfault,
int64_t pgmajfault, int64_t rss_in_bytes, int64_t cache_in_bytes,
- int64_t swap_in_bytes, int64_t process_start_time_ns) {
+ int64_t swap_in_bytes, int64_t process_start_time_ns,
+ int32_t min_oom_score) {
assert(ctx != NULL);
int ret = -EINVAL;
if (!ctx) {
@@ -117,5 +118,9 @@
return ret;
}
+ if ((ret = android_log_write_int32(ctx, min_oom_score)) < 0) {
+ return ret;
+ }
+
return write_to_logger(ctx, LOG_ID_STATS);
}
diff --git a/lmkd/statslog.h b/lmkd/statslog.h
index f3abe11..2edba7a 100644
--- a/lmkd/statslog.h
+++ b/lmkd/statslog.h
@@ -88,7 +88,8 @@
stats_write_lmk_kill_occurred(android_log_context ctx, int32_t code, int32_t uid,
char const* process_name, int32_t oom_score, int64_t pgfault,
int64_t pgmajfault, int64_t rss_in_bytes, int64_t cache_in_bytes,
- int64_t swap_in_bytes, int64_t process_start_time_ns);
+ int64_t swap_in_bytes, int64_t process_start_time_ns,
+ int32_t min_oom_score);
__END_DECLS
diff --git a/rootdir/etc/ld.config.txt b/rootdir/etc/ld.config.txt
index 28703d2..4b2dd07 100644
--- a/rootdir/etc/ld.config.txt
+++ b/rootdir/etc/ld.config.txt
@@ -68,6 +68,9 @@
namespace.default.permitted.paths += /vendor/framework
namespace.default.permitted.paths += /vendor/app
namespace.default.permitted.paths += /vendor/priv-app
+namespace.default.permitted.paths += /system/vendor/framework
+namespace.default.permitted.paths += /system/vendor/app
+namespace.default.permitted.paths += /system/vendor/priv-app
namespace.default.permitted.paths += /odm/framework
namespace.default.permitted.paths += /odm/app
namespace.default.permitted.paths += /odm/priv-app
@@ -100,6 +103,9 @@
namespace.default.asan.permitted.paths += /vendor/framework
namespace.default.asan.permitted.paths += /vendor/app
namespace.default.asan.permitted.paths += /vendor/priv-app
+namespace.default.asan.permitted.paths += /system/vendor/framework
+namespace.default.asan.permitted.paths += /system/vendor/app
+namespace.default.asan.permitted.paths += /system/vendor/priv-app
namespace.default.asan.permitted.paths += /odm/framework
namespace.default.asan.permitted.paths += /odm/app
namespace.default.asan.permitted.paths += /odm/priv-app
@@ -236,6 +242,7 @@
namespace.sphal.permitted.paths = /odm/${LIB}
namespace.sphal.permitted.paths += /vendor/${LIB}
+namespace.sphal.permitted.paths += /system/vendor/${LIB}
namespace.sphal.asan.search.paths = /data/asan/odm/${LIB}
namespace.sphal.asan.search.paths += /odm/${LIB}
@@ -278,6 +285,7 @@
namespace.rs.permitted.paths = /odm/${LIB}
namespace.rs.permitted.paths += /vendor/${LIB}
+namespace.rs.permitted.paths += /system/vendor/${LIB}
namespace.rs.permitted.paths += /data
namespace.rs.asan.search.paths = /data/asan/odm/${LIB}/vndk-sp
@@ -323,6 +331,8 @@
namespace.vndk.permitted.paths += /odm/${LIB}/egl
namespace.vndk.permitted.paths += /vendor/${LIB}/hw
namespace.vndk.permitted.paths += /vendor/${LIB}/egl
+namespace.vndk.permitted.paths += /system/vendor/${LIB}/hw
+namespace.vndk.permitted.paths += /system/vendor/${LIB}/egl
# This is exceptionally required since android.hidl.memory@1.0-impl.so is here
namespace.vndk.permitted.paths += /system/${LIB}/vndk-sp%VNDK_VER%/hw
@@ -390,6 +400,7 @@
namespace.default.permitted.paths = /odm
namespace.default.permitted.paths += /vendor
+namespace.default.permitted.paths += /system/vendor
#VNDK27#namespace.default.search.paths += /vendor/${LIB}/hw
#VNDK27#namespace.default.search.paths += /vendor/${LIB}/egl
diff --git a/rootdir/etc/ld.config.vndk_lite.txt b/rootdir/etc/ld.config.vndk_lite.txt
index a5beb4e..54f4c98 100644
--- a/rootdir/etc/ld.config.vndk_lite.txt
+++ b/rootdir/etc/ld.config.vndk_lite.txt
@@ -184,6 +184,7 @@
namespace.sphal.permitted.paths = /odm/${LIB}
namespace.sphal.permitted.paths += /vendor/${LIB}
+namespace.sphal.permitted.paths += /system/vendor/${LIB}
namespace.sphal.asan.search.paths = /data/asan/odm/${LIB}
namespace.sphal.asan.search.paths += /odm/${LIB}
@@ -226,6 +227,7 @@
namespace.rs.permitted.paths = /odm/${LIB}
namespace.rs.permitted.paths += /vendor/${LIB}
+namespace.rs.permitted.paths += /system/vendor/${LIB}
namespace.rs.permitted.paths += /data
namespace.rs.asan.search.paths = /data/asan/odm/${LIB}/vndk-sp
@@ -271,6 +273,7 @@
namespace.vndk.permitted.paths += /odm/${LIB}/egl
namespace.vndk.permitted.paths += /vendor/${LIB}/hw
namespace.vndk.permitted.paths += /vendor/${LIB}/egl
+namespace.vndk.permitted.paths += /system/vendor/${LIB}/egl
# This is exceptionally required since android.hidl.memory@1.0-impl.so is here
namespace.vndk.permitted.paths += /system/${LIB}/vndk-sp%VNDK_VER%/hw
diff --git a/rootdir/init.rc b/rootdir/init.rc
index ce4b380..b44cc3e 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -13,12 +13,6 @@
# Cgroups are mounted right before early-init using list from /etc/cgroups.json
on early-init
- # Mount shared so changes propagate into child namespaces
- # Do this before other processes are started from init. Otherwise,
- # processes launched while the propagation type of / is 'private'
- # won't get mount events from others.
- mount rootfs rootfs / shared rec
-
# Set init and its forked children's oom_adj.
write /proc/1/oom_score_adj -1000
@@ -43,6 +37,11 @@
start ueventd
+ # Run apexd-bootstrap so that APEXes that provide critical libraries
+ # become available. Note that this is executed as exec_start to ensure that
+ # the libraries are available to the processes started after this statement.
+ exec_start apexd-bootstrap
+
on init
sysclktz 0
@@ -278,18 +277,9 @@
write /dev/cpu_variant:${ro.bionic.2nd_arch} ${ro.bionic.2nd_cpu_variant}
chmod 0444 /dev/cpu_variant:${ro.bionic.2nd_arch}
- # Setup APEX mount point and its security context
- mount tmpfs tmpfs /apex nodev noexec nosuid
- chmod 0755 /apex
- chown root root /apex
- restorecon /apex
-
# Start logd before any other services run to ensure we capture all of their logs.
start logd
- # Start apexd as soon as we can
- start apexd
-
# Start essential services.
start servicemanager
start hwservicemanager
@@ -425,8 +415,16 @@
mkdir /data/bootchart 0755 shell shell
bootchart start
- # /data/apex is now available. Let apexd to scan and activate APEXes.
- setprop apexd.data.status ready
+ # Make sure that apexd is started in the default namespace
+ enter_default_mount_ns
+
+ # /data/apex is now available. Start apexd to scan and activate APEXes.
+ mkdir /data/apex 0750 root system
+ mkdir /data/apex/active 0750 root system
+ mkdir /data/apex/backup 0700 root system
+ mkdir /data/apex/sessions 0700 root system
+ mkdir /data/pkg_staging 0750 system system
+ start apexd
# Avoid predictable entropy pool. Carry over entropy from previous boot.
copy /data/system/entropy.dat /dev/urandom
@@ -543,12 +541,6 @@
mkdir /data/anr 0775 system system
- mkdir /data/apex 0750 root system
- mkdir /data/apex/active 0750 root system
- mkdir /data/apex/backup 0700 root system
- mkdir /data/apex/sessions 0700 root system
- mkdir /data/pkg_staging 0750 system system
-
# NFC: create data/nfc for nv storage
mkdir /data/nfc 0770 nfc nfc
mkdir /data/nfc/param 0770 nfc nfc
@@ -581,6 +573,12 @@
mkdir /data/cache/backup_stage 0700 system system
mkdir /data/cache/backup 0700 system system
+ # Wait for apexd to finish activating APEXes before starting more processes.
+ wait_for_prop apexd.status ready
+ # TODO(jiyong): remove setup_runtime_bionic
+ setup_runtime_bionic
+ parse_apex_configs
+
init_user0
# Set SELinux security contexts on upgrade or policy update.
@@ -589,14 +587,6 @@
# load fsverity keys
exec -- /system/bin/mini-keyctl -c /product/etc/security/cacerts_fsverity,/vendor/etc/security/cacerts_fsverity -k .fs-verity
- # Wait for apexd to finish activating APEXes before starting more processes.
- # This certainly reduces the parallelism but is required to make as many processes
- # as possible to use the bionic libs from the runtime APEX. This takes less than 50ms
- # so the impact on the booting time is not significant.
- wait_for_prop apexd.status ready
- setup_runtime_bionic
- parse_apex_configs
-
# Check any timezone data in /data is newer than the copy in the runtime module, delete if not.
exec - system system -- /system/bin/tzdatacheck /apex/com.android.runtime/etc/tz /data/misc/zoneinfo
diff --git a/toolbox/Android.bp b/toolbox/Android.bp
index f08cf93..7ad6f1c 100644
--- a/toolbox/Android.bp
+++ b/toolbox/Android.bp
@@ -59,6 +59,7 @@
name: "r",
defaults: ["toolbox_defaults"],
srcs: ["r.c"],
+ vendor_available: true,
}
// We build BSD grep separately (but see http://b/111849261).
diff --git a/trusty/storage/proxy/proxy.c b/trusty/storage/proxy/proxy.c
index 41263e5..9a71ae3 100644
--- a/trusty/storage/proxy/proxy.c
+++ b/trusty/storage/proxy/proxy.c
@@ -17,8 +17,8 @@
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
-#include <string.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/capability.h>
#include <sys/prctl.h>
#include <sys/stat.h>
@@ -34,28 +34,24 @@
#define REQ_BUFFER_SIZE 4096
static uint8_t req_buffer[REQ_BUFFER_SIZE + 1];
-static const char *ss_data_root;
-static const char *trusty_devname;
-static const char *rpmb_devname;
-static const char *ss_srv_name = STORAGE_DISK_PROXY_PORT;
+static const char* ss_data_root;
+static const char* trusty_devname;
+static const char* rpmb_devname;
+static const char* ss_srv_name = STORAGE_DISK_PROXY_PORT;
-static const char *_sopts = "hp:d:r:";
-static const struct option _lopts[] = {
- {"help", no_argument, NULL, 'h'},
- {"trusty_dev", required_argument, NULL, 'd'},
- {"data_path", required_argument, NULL, 'p'},
- {"rpmb_dev", required_argument, NULL, 'r'},
- {0, 0, 0, 0}
-};
+static const char* _sopts = "hp:d:r:";
+static const struct option _lopts[] = {{"help", no_argument, NULL, 'h'},
+ {"trusty_dev", required_argument, NULL, 'd'},
+ {"data_path", required_argument, NULL, 'p'},
+ {"rpmb_dev", required_argument, NULL, 'r'},
+ {0, 0, 0, 0}};
-static void show_usage_and_exit(int code)
-{
+static void show_usage_and_exit(int code) {
ALOGE("usage: storageproxyd -d <trusty_dev> -p <data_path> -r <rpmb_dev>\n");
exit(code);
}
-static int drop_privs(void)
-{
+static int drop_privs(void) {
struct __user_cap_header_struct capheader;
struct __user_cap_data_struct capdata[2];
@@ -95,12 +91,10 @@
return 0;
}
-static int handle_req(struct storage_msg *msg, const void *req, size_t req_len)
-{
+static int handle_req(struct storage_msg* msg, const void* req, size_t req_len) {
int rc;
- if ((msg->flags & STORAGE_MSG_FLAG_POST_COMMIT) &&
- (msg->cmd != STORAGE_RPMB_SEND)) {
+ if ((msg->flags & STORAGE_MSG_FLAG_POST_COMMIT) && (msg->cmd != STORAGE_RPMB_SEND)) {
/*
* handling post commit messages on non rpmb commands are not
* implemented as there is no use case for this yet.
@@ -119,42 +113,42 @@
}
switch (msg->cmd) {
- case STORAGE_FILE_DELETE:
- rc = storage_file_delete(msg, req, req_len);
- break;
+ case STORAGE_FILE_DELETE:
+ rc = storage_file_delete(msg, req, req_len);
+ break;
- case STORAGE_FILE_OPEN:
- rc = storage_file_open(msg, req, req_len);
- break;
+ case STORAGE_FILE_OPEN:
+ rc = storage_file_open(msg, req, req_len);
+ break;
- case STORAGE_FILE_CLOSE:
- rc = storage_file_close(msg, req, req_len);
- break;
+ case STORAGE_FILE_CLOSE:
+ rc = storage_file_close(msg, req, req_len);
+ break;
- case STORAGE_FILE_WRITE:
- rc = storage_file_write(msg, req, req_len);
- break;
+ case STORAGE_FILE_WRITE:
+ rc = storage_file_write(msg, req, req_len);
+ break;
- case STORAGE_FILE_READ:
- rc = storage_file_read(msg, req, req_len);
- break;
+ case STORAGE_FILE_READ:
+ rc = storage_file_read(msg, req, req_len);
+ break;
- case STORAGE_FILE_GET_SIZE:
- rc = storage_file_get_size(msg, req, req_len);
- break;
+ case STORAGE_FILE_GET_SIZE:
+ rc = storage_file_get_size(msg, req, req_len);
+ break;
- case STORAGE_FILE_SET_SIZE:
- rc = storage_file_set_size(msg, req, req_len);
- break;
+ case STORAGE_FILE_SET_SIZE:
+ rc = storage_file_set_size(msg, req, req_len);
+ break;
- case STORAGE_RPMB_SEND:
- rc = rpmb_send(msg, req, req_len);
- break;
+ case STORAGE_RPMB_SEND:
+ rc = rpmb_send(msg, req, req_len);
+ break;
- default:
- ALOGE("unhandled command 0x%x\n", msg->cmd);
- msg->result = STORAGE_ERR_UNIMPLEMENTED;
- rc = 1;
+ default:
+ ALOGE("unhandled command 0x%x\n", msg->cmd);
+ msg->result = STORAGE_ERR_UNIMPLEMENTED;
+ rc = 1;
}
if (rc > 0) {
@@ -164,58 +158,50 @@
return rc;
}
-static int proxy_loop(void)
-{
+static int proxy_loop(void) {
ssize_t rc;
struct storage_msg msg;
/* enter main message handling loop */
while (true) {
-
/* get incoming message */
rc = ipc_get_msg(&msg, req_buffer, REQ_BUFFER_SIZE);
- if (rc < 0)
- return rc;
+ if (rc < 0) return rc;
/* handle request */
req_buffer[rc] = 0; /* force zero termination */
rc = handle_req(&msg, req_buffer, rc);
- if (rc)
- return rc;
+ if (rc) return rc;
}
return 0;
}
-static void parse_args(int argc, char *argv[])
-{
+static void parse_args(int argc, char* argv[]) {
int opt;
int oidx = 0;
while ((opt = getopt_long(argc, argv, _sopts, _lopts, &oidx)) != -1) {
switch (opt) {
+ case 'd':
+ trusty_devname = strdup(optarg);
+ break;
- case 'd':
- trusty_devname = strdup(optarg);
- break;
+ case 'p':
+ ss_data_root = strdup(optarg);
+ break;
- case 'p':
- ss_data_root = strdup(optarg);
- break;
+ case 'r':
+ rpmb_devname = strdup(optarg);
+ break;
- case 'r':
- rpmb_devname = strdup(optarg);
- break;
-
- default:
- ALOGE("unrecognized option (%c):\n", opt);
- show_usage_and_exit(EXIT_FAILURE);
+ default:
+ ALOGE("unrecognized option (%c):\n", opt);
+ show_usage_and_exit(EXIT_FAILURE);
}
}
- if (ss_data_root == NULL ||
- trusty_devname == NULL ||
- rpmb_devname == NULL) {
+ if (ss_data_root == NULL || trusty_devname == NULL || rpmb_devname == NULL) {
ALOGE("missing required argument(s)\n");
show_usage_and_exit(EXIT_FAILURE);
}
@@ -226,31 +212,26 @@
ALOGI("rpmb dev: %s\n", rpmb_devname);
}
-int main(int argc, char *argv[])
-{
+int main(int argc, char* argv[]) {
int rc;
/* drop privileges */
- if (drop_privs() < 0)
- return EXIT_FAILURE;
+ if (drop_privs() < 0) return EXIT_FAILURE;
/* parse arguments */
parse_args(argc, argv);
/* initialize secure storage directory */
rc = storage_init(ss_data_root);
- if (rc < 0)
- return EXIT_FAILURE;
+ if (rc < 0) return EXIT_FAILURE;
/* open rpmb device */
rc = rpmb_open(rpmb_devname);
- if (rc < 0)
- return EXIT_FAILURE;
+ if (rc < 0) return EXIT_FAILURE;
/* connect to Trusty secure storage server */
rc = ipc_connect(trusty_devname, ss_srv_name);
- if (rc < 0)
- return EXIT_FAILURE;
+ if (rc < 0) return EXIT_FAILURE;
/* enter main loop */
rc = proxy_loop();
diff --git a/trusty/storage/proxy/rpmb.c b/trusty/storage/proxy/rpmb.c
index 9c79105..e706d0a 100644
--- a/trusty/storage/proxy/rpmb.c
+++ b/trusty/storage/proxy/rpmb.c
@@ -54,14 +54,12 @@
#ifdef RPMB_DEBUG
-static void print_buf(const char *prefix, const uint8_t *buf, size_t size)
-{
+static void print_buf(const char* prefix, const uint8_t* buf, size_t size) {
size_t i;
printf("%s @%p [%zu]", prefix, buf, size);
for (i = 0; i < size; i++) {
- if (i && i % 32 == 0)
- printf("\n%*s", (int) strlen(prefix), "");
+ if (i && i % 32 == 0) printf("\n%*s", (int)strlen(prefix), "");
printf(" %02x", buf[i]);
}
printf("\n");
@@ -70,34 +68,29 @@
#endif
-
-int rpmb_send(struct storage_msg *msg, const void *r, size_t req_len)
-{
+int rpmb_send(struct storage_msg* msg, const void* r, size_t req_len) {
int rc;
struct {
struct mmc_ioc_multi_cmd multi;
struct mmc_ioc_cmd cmd_buf[3];
} mmc = {};
- struct mmc_ioc_cmd *cmd = mmc.multi.cmds;
- const struct storage_rpmb_send_req *req = r;
+ struct mmc_ioc_cmd* cmd = mmc.multi.cmds;
+ const struct storage_rpmb_send_req* req = r;
if (req_len < sizeof(*req)) {
- ALOGW("malformed rpmb request: invalid length (%zu < %zu)\n",
- req_len, sizeof(*req));
+ ALOGW("malformed rpmb request: invalid length (%zu < %zu)\n", req_len, sizeof(*req));
msg->result = STORAGE_ERR_NOT_VALID;
goto err_response;
}
- size_t expected_len =
- sizeof(*req) + req->reliable_write_size + req->write_size;
+ size_t expected_len = sizeof(*req) + req->reliable_write_size + req->write_size;
if (req_len != expected_len) {
- ALOGW("malformed rpmb request: invalid length (%zu != %zu)\n",
- req_len, expected_len);
+ ALOGW("malformed rpmb request: invalid length (%zu != %zu)\n", req_len, expected_len);
msg->result = STORAGE_ERR_NOT_VALID;
goto err_response;
}
- const uint8_t *write_buf = req->payload;
+ const uint8_t* write_buf = req->payload;
if (req->reliable_write_size) {
if ((req->reliable_write_size % MMC_BLOCK_SIZE) != 0) {
ALOGW("invalid reliable write size %u\n", req->reliable_write_size);
@@ -143,8 +136,7 @@
}
if (req->read_size) {
- if (req->read_size % MMC_BLOCK_SIZE != 0 ||
- req->read_size > sizeof(read_buf)) {
+ if (req->read_size % MMC_BLOCK_SIZE != 0 || req->read_size > sizeof(read_buf)) {
ALOGE("%s: invalid read size %u\n", __func__, req->read_size);
msg->result = STORAGE_ERR_NOT_VALID;
goto err_response;
@@ -152,8 +144,7 @@
cmd->write_flag = MMC_WRITE_FLAG_R;
cmd->opcode = MMC_READ_MULTIPLE_BLOCK;
- cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC,
- cmd->blksz = MMC_BLOCK_SIZE;
+ cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC, cmd->blksz = MMC_BLOCK_SIZE;
cmd->blocks = req->read_size / MMC_BLOCK_SIZE;
mmc_ioc_cmd_set_data((*cmd), read_buf);
#ifdef RPMB_DEBUG
@@ -170,8 +161,7 @@
goto err_response;
}
#ifdef RPMB_DEBUG
- if (req->read_size)
- print_buf("response: ", read_buf, req->read_size);
+ if (req->read_size) print_buf("response: ", read_buf, req->read_size);
#endif
if (msg->flags & STORAGE_MSG_FLAG_POST_COMMIT) {
@@ -188,24 +178,19 @@
return ipc_respond(msg, NULL, 0);
}
-
-int rpmb_open(const char *rpmb_devname)
-{
+int rpmb_open(const char* rpmb_devname) {
int rc;
rc = open(rpmb_devname, O_RDWR, 0);
if (rc < 0) {
- ALOGE("unable (%d) to open rpmb device '%s': %s\n",
- errno, rpmb_devname, strerror(errno));
+ ALOGE("unable (%d) to open rpmb device '%s': %s\n", errno, rpmb_devname, strerror(errno));
return rc;
}
rpmb_fd = rc;
return 0;
}
-void rpmb_close(void)
-{
+void rpmb_close(void) {
close(rpmb_fd);
rpmb_fd = -1;
}
-
diff --git a/trusty/storage/proxy/rpmb.h b/trusty/storage/proxy/rpmb.h
index 85cff44..5107361 100644
--- a/trusty/storage/proxy/rpmb.h
+++ b/trusty/storage/proxy/rpmb.h
@@ -18,6 +18,6 @@
#include <stdint.h>
#include <trusty/interface/storage.h>
-int rpmb_open(const char *rpmb_devname);
-int rpmb_send(struct storage_msg *msg, const void *r, size_t req_len);
+int rpmb_open(const char* rpmb_devname);
+int rpmb_send(struct storage_msg* msg, const void* r, size_t req_len);
void rpmb_close(void);