Merge "libsnapshot: Add a helper for waiting for device paths."
diff --git a/debuggerd/debuggerd_test.cpp b/debuggerd/debuggerd_test.cpp
index 5565e8b..e5af425 100644
--- a/debuggerd/debuggerd_test.cpp
+++ b/debuggerd/debuggerd_test.cpp
@@ -34,7 +34,6 @@
#include <android/set_abort_message.h>
#include <bionic/malloc.h>
#include <bionic/mte.h>
-#include <bionic/mte_kernel.h>
#include <bionic/reserved_signals.h>
#include <android-base/cmsg.h>
@@ -386,16 +385,6 @@
#if defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE)
static void SetTagCheckingLevelSync() {
- int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
- if (tagged_addr_ctrl < 0) {
- abort();
- }
-
- tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | PR_MTE_TCF_SYNC;
- if (prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) != 0) {
- abort();
- }
-
HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_SYNC;
if (!android_mallopt(M_SET_HEAP_TAGGING_LEVEL, &heap_tagging_level, sizeof(heap_tagging_level))) {
abort();
diff --git a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
index 0328132..acb75d0 100644
--- a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
+++ b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
@@ -34,7 +34,7 @@
MERGE_COMPLETED = 3;
}
-// Next: 9
+// Next: 10
message SnapshotStatus {
// Name of the snapshot. This is usually the name of the snapshotted
// logical partition; for example, "system_b".
@@ -84,6 +84,9 @@
// the merge process.
// This is non-zero when |state| == MERGING or MERGE_COMPLETED.
uint64 metadata_sectors = 8;
+
+ // True if compression is enabled, false otherwise.
+ bool compression_enabled = 9;
}
// Next: 8
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index cee1de3..e38c821 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -280,7 +280,7 @@
return false;
}
- if (!IsCompressionEnabled() && !EnsureNoOverflowSnapshot(lock.get())) {
+ if (!EnsureNoOverflowSnapshot(lock.get())) {
LOG(ERROR) << "Cannot ensure there are no overflow snapshots.";
return false;
}
@@ -349,6 +349,7 @@
status->set_state(SnapshotState::CREATED);
status->set_sectors_allocated(0);
status->set_metadata_sectors(0);
+ status->set_compression_enabled(IsCompressionEnabled());
if (!WriteSnapshotStatus(lock, *status)) {
PLOG(ERROR) << "Could not write snapshot status: " << status->name();
@@ -1917,13 +1918,13 @@
remaining_time = GetRemainingTime(params.timeout_ms, begin);
if (remaining_time.count() < 0) return false;
- if (context == SnapshotContext::Update && IsCompressionEnabled()) {
+ if (context == SnapshotContext::Update && live_snapshot_status->compression_enabled()) {
// Stop here, we can't run dm-user yet, the COW isn't built.
created_devices.Release();
return true;
}
- if (IsCompressionEnabled()) {
+ if (live_snapshot_status->compression_enabled()) {
auto name = GetDmUserCowName(params.GetPartitionName());
std::string cow_path;
@@ -2760,7 +2761,7 @@
return Return::Error();
}
- if (IsCompressionEnabled()) {
+ if (it->second.compression_enabled()) {
unique_fd fd(open(cow_path.c_str(), O_RDWR | O_CLOEXEC));
if (fd < 0) {
PLOG(ERROR) << "open " << cow_path << " failed for snapshot "
@@ -3179,6 +3180,14 @@
auto& dm = DeviceMapper::Instance();
for (const auto& snapshot : snapshots) {
+ SnapshotStatus status;
+ if (!ReadSnapshotStatus(lock, snapshot, &status)) {
+ return false;
+ }
+ if (status.compression_enabled()) {
+ continue;
+ }
+
std::vector<DeviceMapper::TargetInfo> targets;
if (!dm.GetTableStatus(snapshot, &targets)) {
LOG(ERROR) << "Could not read snapshot device table: " << snapshot;
diff --git a/fs_mgr/libsnapshot/snapuserd.cpp b/fs_mgr/libsnapshot/snapuserd.cpp
index 954699c..3abc457 100644
--- a/fs_mgr/libsnapshot/snapuserd.cpp
+++ b/fs_mgr/libsnapshot/snapuserd.cpp
@@ -28,6 +28,9 @@
using namespace android::dm;
using android::base::unique_fd;
+#define SNAP_LOG(level) LOG(level) << misc_name_ << ": "
+#define SNAP_PLOG(level) PLOG(level) << misc_name_ << ": "
+
static constexpr size_t PAYLOAD_SIZE = (1UL << 16);
static_assert(PAYLOAD_SIZE >= BLOCK_SIZE);
@@ -94,7 +97,7 @@
// it will be de-compressed.
bool Snapuserd::ProcessReplaceOp(const CowOperation* cow_op) {
if (!reader_->ReadData(*cow_op, &bufsink_)) {
- LOG(ERROR) << "ReadData failed for chunk: " << cow_op->new_block;
+ SNAP_LOG(ERROR) << "ReadData failed for chunk: " << cow_op->new_block;
return false;
}
@@ -111,7 +114,7 @@
// if the successive blocks are contiguous.
if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SIZE,
cow_op->source * BLOCK_SIZE)) {
- LOG(ERROR) << "Copy-op failed. Read from backing store at: " << cow_op->source;
+ SNAP_LOG(ERROR) << "Copy-op failed. Read from backing store at: " << cow_op->source;
return false;
}
@@ -180,14 +183,14 @@
}
default: {
- LOG(ERROR) << "Unknown operation-type found: " << cow_op->type;
+ SNAP_LOG(ERROR) << "Unknown operation-type found: " << cow_op->type;
ret = false;
break;
}
}
if (!ret) {
- LOG(ERROR) << "ReadData failed for operation: " << cow_op->type;
+ SNAP_LOG(ERROR) << "ReadData failed for operation: " << cow_op->type;
return false;
}
@@ -322,7 +325,7 @@
CHECK(!(*unmerged_exceptions == exceptions_per_area_));
- LOG(DEBUG) << "Unmerged_Exceptions: " << *unmerged_exceptions << " Offset: " << offset;
+ SNAP_LOG(DEBUG) << "Unmerged_Exceptions: " << *unmerged_exceptions << " Offset: " << offset;
return offset;
}
@@ -355,11 +358,11 @@
CHECK(cow_de->new_chunk == 0);
break;
} else {
- LOG(ERROR) << "Error in merge operation. Found invalid metadata";
- LOG(ERROR) << "merged_de-old-chunk: " << merged_de->old_chunk;
- LOG(ERROR) << "merged_de-new-chunk: " << merged_de->new_chunk;
- LOG(ERROR) << "cow_de-old-chunk: " << cow_de->old_chunk;
- LOG(ERROR) << "cow_de-new-chunk: " << cow_de->new_chunk;
+ SNAP_LOG(ERROR) << "Error in merge operation. Found invalid metadata";
+ SNAP_LOG(ERROR) << "merged_de-old-chunk: " << merged_de->old_chunk;
+ SNAP_LOG(ERROR) << "merged_de-new-chunk: " << merged_de->new_chunk;
+ SNAP_LOG(ERROR) << "cow_de-old-chunk: " << cow_de->old_chunk;
+ SNAP_LOG(ERROR) << "cow_de-new-chunk: " << cow_de->new_chunk;
return -1;
}
}
@@ -384,19 +387,19 @@
if (!(cow_op->type == kCowReplaceOp || cow_op->type == kCowZeroOp ||
cow_op->type == kCowCopyOp)) {
- LOG(ERROR) << "Unknown operation-type found during merge: " << cow_op->type;
+ SNAP_LOG(ERROR) << "Unknown operation-type found during merge: " << cow_op->type;
return false;
}
merged_ops_cur_iter -= 1;
- LOG(DEBUG) << "Merge op found of type " << cow_op->type
- << "Pending-merge-ops: " << merged_ops_cur_iter;
+ SNAP_LOG(DEBUG) << "Merge op found of type " << cow_op->type
+ << "Pending-merge-ops: " << merged_ops_cur_iter;
cowop_iter_->Next();
}
if (cowop_iter_->Done()) {
CHECK(merged_ops_cur_iter == 0);
- LOG(DEBUG) << "All cow operations merged successfully in this cycle";
+ SNAP_LOG(DEBUG) << "All cow operations merged successfully in this cycle";
}
return true;
@@ -407,14 +410,15 @@
CowHeader header;
if (!reader_->GetHeader(&header)) {
- LOG(ERROR) << "Failed to get header";
+ SNAP_LOG(ERROR) << "Failed to get header";
return false;
}
// ChunkID to vector index
lldiv_t divresult = lldiv(chunk, stride);
CHECK(divresult.quot < vec_.size());
- LOG(DEBUG) << "ProcessMergeComplete: chunk: " << chunk << " Metadata-Index: " << divresult.quot;
+ SNAP_LOG(DEBUG) << "ProcessMergeComplete: chunk: " << chunk
+ << " Metadata-Index: " << divresult.quot;
int unmerged_exceptions = 0;
loff_t offset = GetMergeStartOffset(buffer, vec_[divresult.quot].get(), &unmerged_exceptions);
@@ -429,11 +433,11 @@
header.num_merge_ops += merged_ops_cur_iter;
reader_->UpdateMergeProgress(merged_ops_cur_iter);
if (!writer_->CommitMerge(merged_ops_cur_iter)) {
- LOG(ERROR) << "CommitMerge failed...";
+ SNAP_LOG(ERROR) << "CommitMerge failed...";
return false;
}
- LOG(DEBUG) << "Merge success";
+ SNAP_LOG(DEBUG) << "Merge success";
return true;
}
@@ -513,21 +517,21 @@
bool prev_copy_op = false;
bool metadata_found = false;
- LOG(DEBUG) << "ReadMetadata Start...";
+ SNAP_LOG(DEBUG) << "ReadMetadata Start...";
if (!reader_->Parse(cow_fd_)) {
- LOG(ERROR) << "Failed to parse";
+ SNAP_LOG(ERROR) << "Failed to parse";
return false;
}
if (!reader_->GetHeader(&header)) {
- LOG(ERROR) << "Failed to get header";
+ SNAP_LOG(ERROR) << "Failed to get header";
return false;
}
CHECK(header.block_size == BLOCK_SIZE);
- LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
+ SNAP_LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
writer_ = std::make_unique<CowWriter>(options);
writer_->InitializeMerge(cow_fd_.get(), &header);
@@ -563,7 +567,7 @@
if (!(cow_op->type == kCowReplaceOp || cow_op->type == kCowZeroOp ||
cow_op->type == kCowCopyOp)) {
- LOG(ERROR) << "Unknown operation-type found: " << cow_op->type;
+ SNAP_LOG(ERROR) << "Unknown operation-type found: " << cow_op->type;
return false;
}
@@ -578,7 +582,7 @@
de->old_chunk = cow_op->new_block;
de->new_chunk = next_free;
- LOG(DEBUG) << "Old-chunk: " << de->old_chunk << "New-chunk: " << de->new_chunk;
+ SNAP_LOG(DEBUG) << "Old-chunk: " << de->old_chunk << "New-chunk: " << de->new_chunk;
// Store operation pointer.
chunk_map_[next_free] = cow_op;
@@ -602,7 +606,7 @@
if (cowop_riter_->Done()) {
vec_.push_back(std::move(de_ptr));
- LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
+ SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
}
}
@@ -614,12 +618,12 @@
// is aware that merge is completed.
if (num_ops || !metadata_found) {
vec_.push_back(std::move(de_ptr));
- LOG(DEBUG) << "ReadMetadata() completed. Partially filled area num_ops: " << num_ops
- << "Areas : " << vec_.size();
+ SNAP_LOG(DEBUG) << "ReadMetadata() completed. Partially filled area num_ops: " << num_ops
+ << "Areas : " << vec_.size();
}
- LOG(DEBUG) << "ReadMetadata() completed. chunk_id: " << next_free
- << "Num Sector: " << ChunkToSector(next_free);
+ SNAP_LOG(DEBUG) << "ReadMetadata() completed. chunk_id: " << next_free
+ << "Num Sector: " << ChunkToSector(next_free);
// Initialize the iterator for merging
cowop_iter_ = reader_->GetOpIter();
@@ -643,7 +647,7 @@
// us the sector number for which IO is issued by dm-snapshot device
bool Snapuserd::ReadDmUserHeader() {
if (!android::base::ReadFully(ctrl_fd_, bufsink_.GetBufPtr(), sizeof(struct dm_user_header))) {
- PLOG(ERROR) << "ReadDmUserHeader failed";
+ SNAP_PLOG(ERROR) << "Control-read failed";
return false;
}
@@ -654,7 +658,7 @@
bool Snapuserd::WriteDmUserPayload(size_t size) {
if (!android::base::WriteFully(ctrl_fd_, bufsink_.GetBufPtr(),
sizeof(struct dm_user_header) + size)) {
- PLOG(ERROR) << "Write to dm-user failed";
+ SNAP_PLOG(ERROR) << "Write to dm-user failed";
return false;
}
@@ -663,7 +667,7 @@
bool Snapuserd::ReadDmUserPayload(void* buffer, size_t size) {
if (!android::base::ReadFully(ctrl_fd_, buffer, size)) {
- PLOG(ERROR) << "ReadDmUserPayload failed";
+ SNAP_PLOG(ERROR) << "ReadDmUserPayload failed";
return false;
}
@@ -673,7 +677,7 @@
bool Snapuserd::InitCowDevice() {
cow_fd_.reset(open(cow_device_.c_str(), O_RDWR));
if (cow_fd_ < 0) {
- PLOG(ERROR) << "Open Failed: " << cow_device_;
+ SNAP_PLOG(ERROR) << "Open Failed: " << cow_device_;
return false;
}
@@ -690,13 +694,13 @@
bool Snapuserd::InitBackingAndControlDevice() {
backing_store_fd_.reset(open(backing_store_device_.c_str(), O_RDONLY));
if (backing_store_fd_ < 0) {
- PLOG(ERROR) << "Open Failed: " << backing_store_device_;
+ SNAP_PLOG(ERROR) << "Open Failed: " << backing_store_device_;
return false;
}
ctrl_fd_.reset(open(control_device_.c_str(), O_RDWR));
if (ctrl_fd_ < 0) {
- PLOG(ERROR) << "Unable to open " << control_device_;
+ SNAP_PLOG(ERROR) << "Unable to open " << control_device_;
return false;
}
@@ -709,15 +713,15 @@
bufsink_.Clear();
if (!ReadDmUserHeader()) {
- LOG(ERROR) << "ReadDmUserHeader failed";
+ SNAP_LOG(ERROR) << "ReadDmUserHeader failed";
return false;
}
- LOG(DEBUG) << "msg->seq: " << std::hex << header->seq;
- LOG(DEBUG) << "msg->type: " << std::hex << header->type;
- LOG(DEBUG) << "msg->flags: " << std::hex << header->flags;
- LOG(DEBUG) << "msg->sector: " << std::hex << header->sector;
- LOG(DEBUG) << "msg->len: " << std::hex << header->len;
+ SNAP_LOG(DEBUG) << "msg->seq: " << std::hex << header->seq;
+ SNAP_LOG(DEBUG) << "msg->type: " << std::hex << header->type;
+ SNAP_LOG(DEBUG) << "msg->flags: " << std::hex << header->flags;
+ SNAP_LOG(DEBUG) << "msg->sector: " << std::hex << header->sector;
+ SNAP_LOG(DEBUG) << "msg->len: " << std::hex << header->len;
switch (header->type) {
case DM_USER_REQ_MAP_READ: {
@@ -736,7 +740,7 @@
CHECK(metadata_read_done_ == true);
CHECK(read_size == BLOCK_SIZE);
ConstructKernelCowHeader();
- LOG(DEBUG) << "Kernel header constructed";
+ SNAP_LOG(DEBUG) << "Kernel header constructed";
} else {
// Convert the sector number to a chunk ID.
//
@@ -747,22 +751,22 @@
if (chunk_map_.find(chunk) == chunk_map_.end()) {
if (!ReadDiskExceptions(chunk, read_size)) {
- LOG(ERROR) << "ReadDiskExceptions failed for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(ERROR) << "ReadDiskExceptions failed for chunk id: " << chunk
+ << "Sector: " << header->sector;
header->type = DM_USER_RESP_ERROR;
} else {
- LOG(DEBUG) << "ReadDiskExceptions success for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(DEBUG) << "ReadDiskExceptions success for chunk id: " << chunk
+ << "Sector: " << header->sector;
}
} else {
chunk_t num_chunks_read = (offset >> BLOCK_SHIFT);
if (!ReadData(chunk + num_chunks_read, read_size)) {
- LOG(ERROR) << "ReadData failed for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(ERROR) << "ReadData failed for chunk id: " << chunk
+ << "Sector: " << header->sector;
header->type = DM_USER_RESP_ERROR;
} else {
- LOG(DEBUG) << "ReadData success for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(DEBUG) << "ReadData success for chunk id: " << chunk
+ << "Sector: " << header->sector;
}
}
}
@@ -817,18 +821,18 @@
header->type = DM_USER_RESP_SUCCESS;
if (!ReadDmUserPayload(buffer, read_size)) {
- LOG(ERROR) << "ReadDmUserPayload failed for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(ERROR) << "ReadDmUserPayload failed for chunk id: " << chunk
+ << "Sector: " << header->sector;
header->type = DM_USER_RESP_ERROR;
}
if (header->type == DM_USER_RESP_SUCCESS && !ProcessMergeComplete(chunk, buffer)) {
- LOG(ERROR) << "ProcessMergeComplete failed for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(ERROR) << "ProcessMergeComplete failed for chunk id: " << chunk
+ << "Sector: " << header->sector;
header->type = DM_USER_RESP_ERROR;
} else {
- LOG(DEBUG) << "ProcessMergeComplete success for chunk id: " << chunk
- << "Sector: " << header->sector;
+ SNAP_LOG(DEBUG) << "ProcessMergeComplete success for chunk id: " << chunk
+ << "Sector: " << header->sector;
}
if (!WriteDmUserPayload(0)) {
diff --git a/fs_mgr/tools/Android.bp b/fs_mgr/tools/Android.bp
index 4d4aae4..d6ccc4b 100644
--- a/fs_mgr/tools/Android.bp
+++ b/fs_mgr/tools/Android.bp
@@ -29,3 +29,15 @@
cflags: ["-Werror"],
}
+
+cc_binary {
+ name: "dmuserd",
+ srcs: ["dmuserd.cpp"],
+
+ shared_libs: [
+ "libbase",
+ "liblog",
+ ],
+
+ cflags: ["-Werror"],
+}
diff --git a/fs_mgr/tools/dmuserd.cpp b/fs_mgr/tools/dmuserd.cpp
new file mode 100644
index 0000000..e50a4a2
--- /dev/null
+++ b/fs_mgr/tools/dmuserd.cpp
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: Apache-2.0
+
+#define _LARGEFILE64_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <iostream>
+
+#define SECTOR_SIZE ((__u64)512)
+#define BUFFER_BYTES 4096
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* This should be replaced with linux/dm-user.h. */
+#ifndef _LINUX_DM_USER_H
+#define _LINUX_DM_USER_H
+
+#include <linux/types.h>
+
+#define DM_USER_REQ_MAP_READ 0
+#define DM_USER_REQ_MAP_WRITE 1
+#define DM_USER_REQ_MAP_FLUSH 2
+#define DM_USER_REQ_MAP_DISCARD 3
+#define DM_USER_REQ_MAP_SECURE_ERASE 4
+#define DM_USER_REQ_MAP_WRITE_SAME 5
+#define DM_USER_REQ_MAP_WRITE_ZEROES 6
+#define DM_USER_REQ_MAP_ZONE_OPEN 7
+#define DM_USER_REQ_MAP_ZONE_CLOSE 8
+#define DM_USER_REQ_MAP_ZONE_FINISH 9
+#define DM_USER_REQ_MAP_ZONE_APPEND 10
+#define DM_USER_REQ_MAP_ZONE_RESET 11
+#define DM_USER_REQ_MAP_ZONE_RESET_ALL 12
+
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DEV 0x00001
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_TRANSPORT 0x00002
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DRIVER 0x00004
+#define DM_USER_REQ_MAP_FLAG_SYNC 0x00008
+#define DM_USER_REQ_MAP_FLAG_META 0x00010
+#define DM_USER_REQ_MAP_FLAG_PRIO 0x00020
+#define DM_USER_REQ_MAP_FLAG_NOMERGE 0x00040
+#define DM_USER_REQ_MAP_FLAG_IDLE 0x00080
+#define DM_USER_REQ_MAP_FLAG_INTEGRITY 0x00100
+#define DM_USER_REQ_MAP_FLAG_FUA 0x00200
+#define DM_USER_REQ_MAP_FLAG_PREFLUSH 0x00400
+#define DM_USER_REQ_MAP_FLAG_RAHEAD 0x00800
+#define DM_USER_REQ_MAP_FLAG_BACKGROUND 0x01000
+#define DM_USER_REQ_MAP_FLAG_NOWAIT 0x02000
+#define DM_USER_REQ_MAP_FLAG_CGROUP_PUNT 0x04000
+#define DM_USER_REQ_MAP_FLAG_NOUNMAP 0x08000
+#define DM_USER_REQ_MAP_FLAG_HIPRI 0x10000
+#define DM_USER_REQ_MAP_FLAG_DRV 0x20000
+#define DM_USER_REQ_MAP_FLAG_SWAP 0x40000
+
+#define DM_USER_RESP_SUCCESS 0
+#define DM_USER_RESP_ERROR 1
+#define DM_USER_RESP_UNSUPPORTED 2
+
+struct dm_user_message {
+ __u64 seq;
+ __u64 type;
+ __u64 flags;
+ __u64 sector;
+ __u64 len;
+ __u8 buf[];
+};
+
+#endif
+
+static bool verbose = false;
+
+ssize_t write_all(int fd, void* buf, size_t len) {
+ char* buf_c = (char*)buf;
+ ssize_t total = 0;
+ ssize_t once;
+
+ while (total < len) {
+ once = write(fd, buf_c + total, len - total);
+ if (once < 0) return once;
+ if (once == 0) {
+ errno = ENOSPC;
+ return 0;
+ }
+ total += once;
+ }
+
+ return total;
+}
+
+ssize_t read_all(int fd, void* buf, size_t len) {
+ char* buf_c = (char*)buf;
+ ssize_t total = 0;
+ ssize_t once;
+
+ while (total < len) {
+ once = read(fd, buf_c + total, len - total);
+ if (once < 0) return once;
+ if (once == 0) {
+ errno = ENOSPC;
+ return 0;
+ }
+ total += once;
+ }
+
+ return total;
+}
+
+int not_splice(int from, int to, __u64 count) {
+ while (count > 0) {
+ char buf[BUFFER_BYTES];
+ __u64 max = count > BUFFER_BYTES ? BUFFER_BYTES : count;
+
+ if (read_all(from, buf, max) <= 0) {
+ perror("Unable to read");
+ return -EIO;
+ }
+
+ if (write_all(to, buf, max) <= 0) {
+ perror("Unable to write");
+ return -EIO;
+ }
+
+ count -= max;
+ }
+
+ return 0;
+}
+
+int simple_daemon(char* control_path, char* backing_path) {
+ int control_fd = open(control_path, O_RDWR);
+ if (control_fd < 0) {
+ fprintf(stderr, "Unable to open control device %s\n", control_path);
+ return -1;
+ }
+
+ int backing_fd = open(backing_path, O_RDWR);
+ if (backing_fd < 0) {
+ fprintf(stderr, "Unable to open backing device %s\n", backing_path);
+ return -1;
+ }
+
+ while (1) {
+ struct dm_user_message msg;
+ char* base;
+ __u64 type;
+
+ if (verbose) std::cerr << "dmuserd: Waiting for message...\n";
+
+ if (read_all(control_fd, &msg, sizeof(msg)) < 0) {
+ if (errno == ENOTBLK) return 0;
+
+ perror("unable to read msg");
+ return -1;
+ }
+
+ if (verbose) {
+ std::string type;
+ switch (msg.type) {
+ case DM_USER_REQ_MAP_WRITE:
+ type = "write";
+ break;
+ case DM_USER_REQ_MAP_READ:
+ type = "read";
+ break;
+ case DM_USER_REQ_MAP_FLUSH:
+ type = "flush";
+ break;
+ default:
+ /*
+ * FIXME: Can't I do "whatever"s here rather that
+ * std::string("whatever")?
+ */
+ type = std::string("(unknown, id=") + std::to_string(msg.type) + ")";
+ break;
+ }
+
+ std::string flags;
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_SYNC) {
+ if (!flags.empty()) flags += "|";
+ flags += "S";
+ }
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_META) {
+ if (!flags.empty()) flags += "|";
+ flags += "M";
+ }
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_FUA) {
+ if (!flags.empty()) flags += "|";
+ flags += "FUA";
+ }
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_PREFLUSH) {
+ if (!flags.empty()) flags += "|";
+ flags += "F";
+ }
+
+ std::cerr << "dmuserd: Got " << type << " request " << flags << " for sector "
+ << std::to_string(msg.sector) << " with length " << std::to_string(msg.len)
+ << "\n";
+ }
+
+ type = msg.type;
+ switch (type) {
+ case DM_USER_REQ_MAP_READ:
+ msg.type = DM_USER_RESP_SUCCESS;
+ break;
+ case DM_USER_REQ_MAP_WRITE:
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_PREFLUSH ||
+ msg.flags & DM_USER_REQ_MAP_FLAG_FUA) {
+ if (fsync(backing_fd) < 0) {
+ perror("Unable to fsync(), just sync()ing instead");
+ sync();
+ }
+ }
+ msg.type = DM_USER_RESP_SUCCESS;
+ if (lseek64(backing_fd, msg.sector * SECTOR_SIZE, SEEK_SET) < 0) {
+ perror("Unable to seek");
+ return -1;
+ }
+ if (not_splice(control_fd, backing_fd, msg.len) < 0) {
+ if (errno == ENOTBLK) return 0;
+ std::cerr << "unable to handle write data\n";
+ return -1;
+ }
+ if (msg.flags & DM_USER_REQ_MAP_FLAG_FUA) {
+ if (fsync(backing_fd) < 0) {
+ perror("Unable to fsync(), just sync()ing instead");
+ sync();
+ }
+ }
+ break;
+ case DM_USER_REQ_MAP_FLUSH:
+ msg.type = DM_USER_RESP_SUCCESS;
+ if (fsync(backing_fd) < 0) {
+ perror("Unable to fsync(), just sync()ing instead");
+ sync();
+ }
+ break;
+ default:
+ std::cerr << "dmuserd: unsupported op " << std::to_string(msg.type) << "\n";
+ msg.type = DM_USER_RESP_UNSUPPORTED;
+ break;
+ }
+
+ if (verbose) std::cerr << "dmuserd: Responding to message\n";
+
+ if (write_all(control_fd, &msg, sizeof(msg)) < 0) {
+ if (errno == ENOTBLK) return 0;
+ perror("unable to write msg");
+ return -1;
+ }
+
+ switch (type) {
+ case DM_USER_REQ_MAP_READ:
+ if (verbose) std::cerr << "dmuserd: Sending read data\n";
+ if (lseek64(backing_fd, msg.sector * SECTOR_SIZE, SEEK_SET) < 0) {
+ perror("Unable to seek");
+ return -1;
+ }
+ if (not_splice(backing_fd, control_fd, msg.len) < 0) {
+ if (errno == ENOTBLK) return 0;
+ std::cerr << "unable to handle read data\n";
+ return -1;
+ }
+ break;
+ }
+ }
+
+ /* The daemon doesn't actully terminate for this test. */
+ perror("Unable to read from control device");
+ return -1;
+}
+
+void usage(char* prog) {
+ printf("Usage: %s\n", prog);
+ printf(" Handles block requests in userspace, backed by memory\n");
+ printf(" -h Display this help message\n");
+ printf(" -c <control dev> Control device to use for the test\n");
+ printf(" -b <store path> The file to use as a backing store, otherwise memory\n");
+ printf(" -v Enable verbose mode\n");
+}
+
+int main(int argc, char* argv[]) {
+ char* control_path = NULL;
+ char* backing_path = NULL;
+ char* store;
+ int c;
+
+ prctl(PR_SET_IO_FLUSHER, 0, 0, 0, 0);
+
+ while ((c = getopt(argc, argv, "h:c:s:b:v")) != -1) {
+ switch (c) {
+ case 'h':
+ usage(basename(argv[0]));
+ exit(0);
+ case 'c':
+ control_path = strdup(optarg);
+ break;
+ case 'b':
+ backing_path = strdup(optarg);
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ usage(basename(argv[0]));
+ exit(1);
+ }
+ }
+
+ int r = simple_daemon(control_path, backing_path);
+ if (r) fprintf(stderr, "simple_daemon() errored out\n");
+ return r;
+}
diff --git a/init/Android.mk b/init/Android.mk
index ac31ef1..4c1665b 100644
--- a/init/Android.mk
+++ b/init/Android.mk
@@ -82,6 +82,7 @@
$(TARGET_RAMDISK_OUT)/proc \
$(TARGET_RAMDISK_OUT)/second_stage_resources \
$(TARGET_RAMDISK_OUT)/sys \
+ $(TARGET_RAMDISK_OUT)/metadata \
LOCAL_STATIC_LIBRARIES := \
libc++fs \
diff --git a/init/README.md b/init/README.md
index ab6a885..bcbbfbb 100644
--- a/init/README.md
+++ b/init/README.md
@@ -451,6 +451,10 @@
exist. And it will be truncated if dst file is a normal regular file and
already exists.
+`copy_per_line <src> <dst>`
+> Copies a file line by line. Similar to copy, but useful for dst is a sysfs node
+ that doesn't handle multiple lines of data.
+
`domainname <name>`
> Set the domain name.
diff --git a/init/builtins.cpp b/init/builtins.cpp
index d00d1b1..b235d2f 100644
--- a/init/builtins.cpp
+++ b/init/builtins.cpp
@@ -88,6 +88,7 @@
using android::base::Basename;
using android::base::SetProperty;
+using android::base::Split;
using android::base::StartsWith;
using android::base::StringPrintf;
using android::base::unique_fd;
@@ -968,6 +969,23 @@
return {};
}
+static Result<void> do_copy_per_line(const BuiltinArguments& args) {
+ std::string file_contents;
+ if (!android::base::ReadFileToString(args[1], &file_contents, true)) {
+ return Error() << "Could not read input file '" << args[1] << "'";
+ }
+ auto lines = Split(file_contents, "\n");
+ for (const auto& line : lines) {
+ auto result = WriteFile(args[2], line);
+ if (!result.ok()) {
+ LOG(VERBOSE) << "Could not write to output file '" << args[2] << "' with '" << line
+ << "' : " << result.error();
+ }
+ }
+
+ return {};
+}
+
static Result<void> do_chown(const BuiltinArguments& args) {
auto uid = DecodeUid(args[1]);
if (!uid.ok()) {
@@ -1366,6 +1384,7 @@
{"class_start_post_data", {1, 1, {false, do_class_start_post_data}}},
{"class_stop", {1, 1, {false, do_class_stop}}},
{"copy", {2, 2, {true, do_copy}}},
+ {"copy_per_line", {2, 2, {true, do_copy_per_line}}},
{"domainname", {1, 1, {true, do_domainname}}},
{"enable", {1, 1, {false, do_enable}}},
{"exec", {1, kMax, {false, do_exec}}},
diff --git a/init/first_stage_init.cpp b/init/first_stage_init.cpp
index 91aaffd..83a32e7 100644
--- a/init/first_stage_init.cpp
+++ b/init/first_stage_init.cpp
@@ -117,7 +117,7 @@
auto dst_dir = android::base::Dirname(dst);
std::error_code ec;
- if (!fs::create_directories(dst_dir, ec)) {
+ if (!fs::create_directories(dst_dir, ec) && !!ec) {
LOG(FATAL) << "Cannot create " << dst_dir << ": " << ec.message();
}
if (rename(src, dst) != 0) {
@@ -315,7 +315,7 @@
std::string dest = GetRamdiskPropForSecondStage();
std::string dir = android::base::Dirname(dest);
std::error_code ec;
- if (!fs::create_directories(dir, ec)) {
+ if (!fs::create_directories(dir, ec) && !!ec) {
LOG(FATAL) << "Can't mkdir " << dir << ": " << ec.message();
}
if (!fs::copy_file(kBootImageRamdiskProp, dest, ec)) {
diff --git a/init/host_init_verifier.cpp b/init/host_init_verifier.cpp
index ef9a451..db127d3 100644
--- a/init/host_init_verifier.cpp
+++ b/init/host_init_verifier.cpp
@@ -25,6 +25,8 @@
#include <fstream>
#include <iostream>
#include <iterator>
+#include <map>
+#include <set>
#include <string>
#include <vector>
@@ -51,6 +53,7 @@
using namespace std::literals;
+using android::base::EndsWith;
using android::base::ParseInt;
using android::base::ReadFileToString;
using android::base::Split;
@@ -61,6 +64,10 @@
static std::vector<std::string> passwd_files;
+// NOTE: Keep this in sync with the order used by init.cpp LoadBootScripts()
+static const std::vector<std::string> partition_search_order =
+ std::vector<std::string>({"system", "system_ext", "odm", "vendor", "product"});
+
static std::vector<std::pair<std::string, int>> GetVendorPasswd(const std::string& passwd_file) {
std::string passwd;
if (!ReadFileToString(passwd_file, &passwd)) {
@@ -148,13 +155,24 @@
#include "generated_stub_builtin_function_map.h"
void PrintUsage() {
- std::cout << "usage: host_init_verifier [options] <init rc file>\n"
- "\n"
- "Tests an init script for correctness\n"
- "\n"
- "-p FILE\tSearch this passwd file for users and groups\n"
- "--property_contexts=FILE\t Use this file for property_contexts\n"
- << std::endl;
+ fprintf(stdout, R"(usage: host_init_verifier [options]
+
+Tests init script(s) for correctness.
+
+Generic options:
+ -p FILE Search this passwd file for users and groups.
+ --property_contexts=FILE Use this file for property_contexts.
+
+Single script mode options:
+ [init rc file] Positional argument; test this init script.
+
+Multiple script mode options:
+ --out_system=DIR Path to the output product directory for the system partition.
+ --out_system_ext=DIR Path to the output product directory for the system_ext partition.
+ --out_odm=DIR Path to the output product directory for the odm partition.
+ --out_vendor=DIR Path to the output product directory for the vendor partition.
+ --out_product=DIR Path to the output product directory for the product partition.
+)");
}
Result<InterfaceInheritanceHierarchyMap> ReadInterfaceInheritanceHierarchy() {
@@ -203,12 +221,18 @@
android::base::SetMinimumLogSeverity(android::base::ERROR);
auto property_infos = std::vector<PropertyInfoEntry>();
+ std::map<std::string, std::string> partition_map;
while (true) {
static const char kPropertyContexts[] = "property-contexts=";
static const struct option long_options[] = {
{"help", no_argument, nullptr, 'h'},
{kPropertyContexts, required_argument, nullptr, 0},
+ {"out_system", required_argument, nullptr, 0},
+ {"out_system_ext", required_argument, nullptr, 0},
+ {"out_odm", required_argument, nullptr, 0},
+ {"out_vendor", required_argument, nullptr, 0},
+ {"out_product", required_argument, nullptr, 0},
{nullptr, 0, nullptr, 0},
};
@@ -224,6 +248,16 @@
if (long_options[option_index].name == kPropertyContexts) {
HandlePropertyContexts(optarg, &property_infos);
}
+ for (const auto& p : partition_search_order) {
+ if (long_options[option_index].name == "out_" + p) {
+ if (partition_map.find(p) != partition_map.end()) {
+ PrintUsage();
+ return EXIT_FAILURE;
+ }
+ partition_map[p] =
+ EndsWith(optarg, "/") ? optarg : std::string(optarg) + "/";
+ }
+ }
break;
case 'h':
PrintUsage();
@@ -240,7 +274,9 @@
argc -= optind;
argv += optind;
- if (argc != 1) {
+ // If provided, use the partition map to check multiple init rc files.
+ // Otherwise, check a single init rc file.
+ if ((!partition_map.empty() && argc != 0) || (partition_map.empty() && argc != 1)) {
PrintUsage();
return EXIT_FAILURE;
}
@@ -262,24 +298,42 @@
property_info_area = reinterpret_cast<const PropertyInfoArea*>(serialized_contexts.c_str());
+ if (!partition_map.empty()) {
+ std::vector<std::string> vendor_prefixes;
+ for (const auto& partition : {"vendor", "odm"}) {
+ if (partition_map.find(partition) != partition_map.end()) {
+ vendor_prefixes.push_back(partition_map.at(partition));
+ }
+ }
+ InitializeHostSubcontext(vendor_prefixes);
+ }
+
const BuiltinFunctionMap& function_map = GetBuiltinFunctionMap();
Action::set_function_map(&function_map);
ActionManager& am = ActionManager::GetInstance();
ServiceList& sl = ServiceList::GetInstance();
Parser parser;
- parser.AddSectionParser("service", std::make_unique<ServiceParser>(
- &sl, nullptr, *interface_inheritance_hierarchy_map));
- parser.AddSectionParser("on", std::make_unique<ActionParser>(&am, nullptr));
+ parser.AddSectionParser("service",
+ std::make_unique<ServiceParser>(&sl, GetSubcontext(),
+ *interface_inheritance_hierarchy_map));
+ parser.AddSectionParser("on", std::make_unique<ActionParser>(&am, GetSubcontext()));
parser.AddSectionParser("import", std::make_unique<HostImportParser>());
- if (!parser.ParseConfigFileInsecure(*argv)) {
- LOG(ERROR) << "Failed to open init rc script '" << *argv << "'";
- return EXIT_FAILURE;
+ if (!partition_map.empty()) {
+ for (const auto& p : partition_search_order) {
+ if (partition_map.find(p) != partition_map.end()) {
+ parser.ParseConfig(partition_map.at(p) + "etc/init");
+ }
+ }
+ } else {
+ if (!parser.ParseConfigFileInsecure(*argv)) {
+ LOG(ERROR) << "Failed to open init rc script '" << *argv << "'";
+ return EXIT_FAILURE;
+ }
}
size_t failures = parser.parse_error_count() + am.CheckAllCommands() + sl.CheckAllCommands();
if (failures > 0) {
- LOG(ERROR) << "Failed to parse init script '" << *argv << "' with " << failures
- << " errors";
+ LOG(ERROR) << "Failed to parse init scripts with " << failures << " error(s).";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
diff --git a/init/service.cpp b/init/service.cpp
index 7b98392..766eb5d 100644
--- a/init/service.cpp
+++ b/init/service.cpp
@@ -154,6 +154,7 @@
.priority = 0},
namespaces_{.flags = namespace_flags},
seclabel_(seclabel),
+ subcontext_(subcontext_for_restart_commands),
onrestart_(false, subcontext_for_restart_commands, "<Service '" + name + "' onrestart>", 0,
"onrestart", {}),
oom_score_adjust_(DEFAULT_OOM_SCORE_ADJUST),
diff --git a/init/service.h b/init/service.h
index bc5c90f..aee1e5d 100644
--- a/init/service.h
+++ b/init/service.h
@@ -137,6 +137,7 @@
flags_ &= ~SVC_ONESHOT;
}
}
+ Subcontext* subcontext() const { return subcontext_; }
private:
void NotifyStateChange(const std::string& new_state) const;
@@ -168,6 +169,7 @@
std::vector<FileDescriptor> files_;
std::vector<std::pair<std::string, std::string>> environment_vars_;
+ Subcontext* subcontext_;
Action onrestart_; // Commands to execute on restart.
std::vector<std::string> writepid_files_;
diff --git a/init/service_parser.cpp b/init/service_parser.cpp
index 97621da..57c311a 100644
--- a/init/service_parser.cpp
+++ b/init/service_parser.cpp
@@ -657,6 +657,14 @@
<< "' with a config in APEX";
}
+ std::string context = service_->subcontext() ? service_->subcontext()->context() : "";
+ std::string old_context =
+ old_service->subcontext() ? old_service->subcontext()->context() : "";
+ if (context != old_context) {
+ return Error() << "service '" << service_->name() << "' overrides another service "
+ << "across the treble boundary.";
+ }
+
service_list_->RemoveService(*old_service);
old_service = nullptr;
}
diff --git a/init/subcontext.cpp b/init/subcontext.cpp
index dc2455e..f1fbffe 100644
--- a/init/subcontext.cpp
+++ b/init/subcontext.cpp
@@ -342,6 +342,9 @@
new Subcontext(std::vector<std::string>{"/vendor", "/odm"}, kVendorContext));
}
}
+void InitializeHostSubcontext(std::vector<std::string> vendor_prefixes) {
+ subcontext.reset(new Subcontext(vendor_prefixes, kVendorContext, /*host=*/true));
+}
Subcontext* GetSubcontext() {
return subcontext.get();
diff --git a/init/subcontext.h b/init/subcontext.h
index 788d3be..cb4138e 100644
--- a/init/subcontext.h
+++ b/init/subcontext.h
@@ -36,9 +36,11 @@
class Subcontext {
public:
- Subcontext(std::vector<std::string> path_prefixes, std::string context)
+ Subcontext(std::vector<std::string> path_prefixes, std::string context, bool host = false)
: path_prefixes_(std::move(path_prefixes)), context_(std::move(context)), pid_(0) {
- Fork();
+ if (!host) {
+ Fork();
+ }
}
Result<void> Execute(const std::vector<std::string>& args);
@@ -61,6 +63,7 @@
int SubcontextMain(int argc, char** argv, const BuiltinFunctionMap* function_map);
void InitializeSubcontext();
+void InitializeHostSubcontext(std::vector<std::string> vendor_prefixes);
Subcontext* GetSubcontext();
bool SubcontextChildReap(pid_t pid);
void SubcontextTerminate();
diff --git a/libcutils/fs_config.cpp b/libcutils/fs_config.cpp
index 31e1679..79c3abc 100644
--- a/libcutils/fs_config.cpp
+++ b/libcutils/fs_config.cpp
@@ -203,9 +203,14 @@
CAP_MASK_LONG(CAP_SETGID),
"system/bin/simpleperf_app_runner" },
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/e2fsck" },
- { 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/tune2fs" },
+#ifdef __LP64__
+ { 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/linker64" },
+#else
+ { 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/linker" },
+#endif
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/resize2fs" },
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/snapuserd" },
+ { 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/tune2fs" },
// generic defaults
{ 00755, AID_ROOT, AID_ROOT, 0, "bin/*" },
{ 00640, AID_ROOT, AID_SHELL, 0, "fstab.*" },
diff --git a/libprocessgroup/profiles/task_profiles.json b/libprocessgroup/profiles/task_profiles.json
index ea0064f..b528fa5 100644
--- a/libprocessgroup/profiles/task_profiles.json
+++ b/libprocessgroup/profiles/task_profiles.json
@@ -100,7 +100,7 @@
"Params":
{
"Controller": "cpu",
- "Path": ""
+ "Path": "system"
}
}
]
diff --git a/libsparse/Android.bp b/libsparse/Android.bp
index bf06bbc..860b9ae 100644
--- a/libsparse/Android.bp
+++ b/libsparse/Android.bp
@@ -4,6 +4,7 @@
name: "libsparse",
host_supported: true,
ramdisk_available: true,
+ vendor_ramdisk_available: true,
recovery_available: true,
unique_host_soname: true,
vendor_available: true,
diff --git a/rootdir/Android.mk b/rootdir/Android.mk
index c4c8768..2bceb75 100644
--- a/rootdir/Android.mk
+++ b/rootdir/Android.mk
@@ -69,7 +69,7 @@
EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS :=
ifeq ($(CLANG_COVERAGE),true)
- EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS := export LLVM_PROFILE_FILE /data/misc/trace/clang-%20m.profraw
+ EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS := export LLVM_PROFILE_FILE /data/misc/trace/clang-%p-%m.profraw
endif
# Put it here instead of in init.rc module definition,
diff --git a/rootdir/init.rc b/rootdir/init.rc
index fbb48e8..0e3e3a7 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -153,21 +153,56 @@
mkdir /dev/cpuctl/background
mkdir /dev/cpuctl/top-app
mkdir /dev/cpuctl/rt
+ mkdir /dev/cpuctl/system
chown system system /dev/cpuctl
chown system system /dev/cpuctl/foreground
chown system system /dev/cpuctl/background
chown system system /dev/cpuctl/top-app
chown system system /dev/cpuctl/rt
+ chown system system /dev/cpuctl/system
chown system system /dev/cpuctl/tasks
chown system system /dev/cpuctl/foreground/tasks
chown system system /dev/cpuctl/background/tasks
chown system system /dev/cpuctl/top-app/tasks
chown system system /dev/cpuctl/rt/tasks
+ chown system system /dev/cpuctl/system/tasks
chmod 0664 /dev/cpuctl/tasks
chmod 0664 /dev/cpuctl/foreground/tasks
chmod 0664 /dev/cpuctl/background/tasks
chmod 0664 /dev/cpuctl/top-app/tasks
chmod 0664 /dev/cpuctl/rt/tasks
+ chmod 0664 /dev/cpuctl/system/tasks
+
+ # Create a cpu group for NNAPI HAL processes
+ mkdir /dev/cpuctl/nnapi-hal
+ chown system system /dev/cpuctl/nnapi-hal
+ chown system system /dev/cpuctl/nnapi-hal/tasks
+ chmod 0664 /dev/cpuctl/nnapi-hal/tasks
+ write /dev/cpuctl/nnapi-hal/cpu.uclamp.min 1
+ write /dev/cpuctl/nnapi-hal/cpu.uclamp.latency_sensitive 1
+
+ # Android only use global RT throttling and doesn't use CONFIG_RT_GROUP_SCHED
+ # for RT group throttling. These values here are just to make sure RT threads
+ # can be migrated to those groups. These settings can be removed once we migrate
+ # to GKI kernel.
+ write /dev/cpuctl/cpu.rt_period_us 1000000
+ write /dev/cpuctl/cpu.rt_runtime_us 950000
+ # Surfaceflinger is in FG group so giving it a bit more
+ write /dev/cpuctl/foreground/cpu.rt_runtime_us 450000
+ write /dev/cpuctl/foreground/cpu.rt_period_us 1000000
+ write /dev/cpuctl/background/cpu.rt_runtime_us 100000
+ write /dev/cpuctl/background/cpu.rt_period_us 1000000
+ write /dev/cpuctl/top-app/cpu.rt_runtime_us 100000
+ write /dev/cpuctl/top-app/cpu.rt_period_us 1000000
+ write /dev/cpuctl/rt/cpu.rt_runtime_us 100000
+ write /dev/cpuctl/rt/cpu.rt_period_us 1000000
+ write /dev/cpuctl/system/cpu.rt_runtime_us 100000
+ write /dev/cpuctl/system/cpu.rt_period_us 1000000
+ write /dev/cpuctl/nnapi-hal/cpu.rt_runtime_us 100000
+ write /dev/cpuctl/nnapi-hal/cpu.rt_period_us 1000000
+
+ # Migrate root group to system subgroup
+ copy_per_line /dev/cpuctl/tasks /dev/cpuctl/system/tasks
# Create an stune group for NNAPI HAL processes
mkdir /dev/stune/nnapi-hal
@@ -177,14 +212,6 @@
write /dev/stune/nnapi-hal/schedtune.boost 1
write /dev/stune/nnapi-hal/schedtune.prefer_idle 1
- # cpuctl hierarchy for devices using utilclamp
- mkdir /dev/cpuctl/nnapi-hal
- chown system system /dev/cpuctl/nnapi-hal
- chown system system /dev/cpuctl/nnapi-hal/tasks
- chmod 0664 /dev/cpuctl/nnapi-hal/tasks
- write /dev/cpuctl/nnapi-hal/cpu.uclamp.min 1
- write /dev/cpuctl/nnapi-hal/cpu.uclamp.latency_sensitive 1
-
# Create blkio group and apply initial settings.
# This feature needs kernel to support it, and the
# device's init.rc must actually set the correct values.
@@ -302,8 +329,6 @@
chown system system /dev/cpuctl
chown system system /dev/cpuctl/tasks
chmod 0666 /dev/cpuctl/tasks
- write /dev/cpuctl/cpu.rt_period_us 1000000
- write /dev/cpuctl/cpu.rt_runtime_us 950000
# sets up initial cpusets for ActivityManager
# this ensures that the cpusets are present and usable, but the device's
@@ -808,6 +833,10 @@
wait_for_prop apexd.status activated
perform_apex_config
+ # After apexes are mounted, tell keymaster early boot has ended, so it will
+ # stop allowing use of early-boot keys
+ exec - system system -- /system/bin/vdc keymaster early-boot-ended
+
# Special-case /data/media/obb per b/64566063
mkdir /data/media 0770 media_rw media_rw encryption=None
exec - media_rw media_rw -- /system/bin/chattr +F /data/media
@@ -1147,3 +1176,7 @@
on property:sys.boot_completed=1 && property:sys.init.userspace_reboot.in_progress=1
setprop sys.init.userspace_reboot.in_progress ""
+
+# Migrate tasks again in case kernel threads are created during boot
+on property:sys.boot_completed=1
+ copy_per_line /dev/cpuctl/tasks /dev/cpuctl/system/tasks
diff --git a/trusty/libtrusty/trusty.c b/trusty/libtrusty/trusty.c
index ad4d8cd..f44f8b4 100644
--- a/trusty/libtrusty/trusty.c
+++ b/trusty/libtrusty/trusty.c
@@ -29,30 +29,27 @@
#include <trusty/ipc.h>
-int tipc_connect(const char *dev_name, const char *srv_name)
-{
- int fd;
- int rc;
+int tipc_connect(const char* dev_name, const char* srv_name) {
+ int fd;
+ int rc;
- fd = open(dev_name, O_RDWR);
- if (fd < 0) {
- rc = -errno;
- ALOGE("%s: cannot open tipc device \"%s\": %s\n",
- __func__, dev_name, strerror(errno));
- return rc < 0 ? rc : -1;
- }
+ fd = TEMP_FAILURE_RETRY(open(dev_name, O_RDWR));
+ if (fd < 0) {
+ rc = -errno;
+ ALOGE("%s: cannot open tipc device \"%s\": %s\n", __func__, dev_name, strerror(errno));
+ return rc < 0 ? rc : -1;
+ }
- rc = ioctl(fd, TIPC_IOC_CONNECT, srv_name);
- if (rc < 0) {
- rc = -errno;
- ALOGE("%s: can't connect to tipc service \"%s\" (err=%d)\n",
- __func__, srv_name, errno);
- close(fd);
- return rc < 0 ? rc : -1;
- }
+ rc = TEMP_FAILURE_RETRY(ioctl(fd, TIPC_IOC_CONNECT, srv_name));
+ if (rc < 0) {
+ rc = -errno;
+ ALOGE("%s: can't connect to tipc service \"%s\" (err=%d)\n", __func__, srv_name, errno);
+ close(fd);
+ return rc < 0 ? rc : -1;
+ }
- ALOGV("%s: connected to \"%s\" fd %d\n", __func__, srv_name, fd);
- return fd;
+ ALOGV("%s: connected to \"%s\" fd %d\n", __func__, srv_name, fd);
+ return fd;
}
ssize_t tipc_send(int fd, const struct iovec* iov, int iovcnt, struct trusty_shm* shms,
@@ -63,7 +60,7 @@
req.shm = (__u64)shms;
req.shm_cnt = (__u64)shmcnt;
- int rc = ioctl(fd, TIPC_IOC_SEND_MSG, &req);
+ int rc = TEMP_FAILURE_RETRY(ioctl(fd, TIPC_IOC_SEND_MSG, &req));
if (rc < 0) {
ALOGE("%s: failed to send message (err=%d)\n", __func__, rc);
}
@@ -71,7 +68,6 @@
return rc;
}
-void tipc_close(int fd)
-{
- close(fd);
+void tipc_close(int fd) {
+ close(fd);
}
diff --git a/trusty/trusty-test.mk b/trusty/trusty-test.mk
index dc4c962..74106ec 100644
--- a/trusty/trusty-test.mk
+++ b/trusty/trusty-test.mk
@@ -15,4 +15,5 @@
PRODUCT_PACKAGES += \
spiproxyd \
trusty_keymaster_set_attestation_key \
- keymaster_soft_attestation_keys.xml \
\ No newline at end of file
+ keymaster_soft_attestation_keys.xml \
+