blob: 54a2c1a9d6c294a014fac08474e995451fcdd032 [file] [log] [blame]
Amin Hassanid7da8f42017-08-23 14:29:40 -07001//
2// Copyright (C) 2017 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_generator/squashfs_filesystem.h"
18
19#include <fcntl.h>
20
21#include <algorithm>
22#include <string>
23
24#include <base/files/file_util.h>
25#include <base/logging.h>
26#include <base/strings/string_number_conversions.h>
27#include <base/strings/string_split.h>
28#include <brillo/streams/file_stream.h>
29
30#include "update_engine/common/subprocess.h"
31#include "update_engine/common/utils.h"
32#include "update_engine/payload_generator/delta_diff_generator.h"
33#include "update_engine/payload_generator/extent_ranges.h"
34#include "update_engine/payload_generator/extent_utils.h"
35#include "update_engine/update_metadata.pb.h"
36
37using std::string;
38using std::unique_ptr;
39using std::vector;
40
41namespace chromeos_update_engine {
42
43namespace {
44
45Extent ExtentForBytes(uint64_t block_size,
46 uint64_t start_bytes,
47 uint64_t size_bytes) {
48 uint64_t start_block = start_bytes / block_size;
49 uint64_t end_block = (start_bytes + size_bytes + block_size - 1) / block_size;
50 return ExtentForRange(start_block, end_block - start_block);
51}
52
53// The size of the squashfs super block.
54constexpr size_t kSquashfsSuperBlockSize = 96;
55constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
56
57bool ReadSquashfsHeader(const brillo::Blob blob,
58 SquashfsFilesystem::SquashfsHeader* header) {
59 if (blob.size() < kSquashfsSuperBlockSize) {
60 return false;
61 }
62
63 memcpy(&header->magic, blob.data(), 4);
64 memcpy(&header->block_size, blob.data() + 12, 4);
65 memcpy(&header->compression_type, blob.data() + 20, 2);
66 memcpy(&header->major_version, blob.data() + 28, 2);
67 return true;
68}
69
70bool CheckHeader(const SquashfsFilesystem::SquashfsHeader& header) {
71 return header.magic == 0x73717368 && header.major_version == 4;
72}
73
74bool GetFileMapContent(const string& sqfs_path, string* map) {
75 // Create a tmp file
76 string map_file;
77 TEST_AND_RETURN_FALSE(
78 utils::MakeTempFile("squashfs_file_map.XXXXXX", &map_file, nullptr));
79 ScopedPathUnlinker map_unlinker(map_file);
80
81 // Run unsquashfs to get the system file map.
82 // unsquashfs -m <map-file> <squashfs-file>
83 vector<string> cmd = {"unsquashfs", "-m", map_file, sqfs_path};
84 string stdout;
85 int exit_code;
86 if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) ||
87 exit_code != 0) {
88 LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: "
89 << stdout;
90 return false;
91 }
92 TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map));
93 return true;
94}
95
96} // namespace
97
98bool SquashfsFilesystem::Init(const string& map,
99 size_t size,
100 const SquashfsHeader& header) {
101 size_ = size;
102 // Reading files map. For the format of the file map look at the comments for
103 // |CreateFromFileMap()|.
104 auto lines = base::SplitStringPiece(map,
105 "\n",
106 base::WhitespaceHandling::KEEP_WHITESPACE,
107 base::SplitResult::SPLIT_WANT_NONEMPTY);
108 for (const auto& line : lines) {
109 auto splits =
110 base::SplitStringPiece(line,
111 " \t",
112 base::WhitespaceHandling::TRIM_WHITESPACE,
113 base::SplitResult::SPLIT_WANT_NONEMPTY);
114 // Only filename is invalid.
115 TEST_AND_RETURN_FALSE(splits.size() > 1);
116 uint64_t start;
117 TEST_AND_RETURN_FALSE(base::StringToUint64(splits[1], &start));
118 uint64_t cur_offset = start;
119 for (size_t i = 2; i < splits.size(); ++i) {
120 uint64_t blk_size;
121 TEST_AND_RETURN_FALSE(base::StringToUint64(splits[i], &blk_size));
122 // TODO(ahassani): For puffin push it into a proper list if uncompressed.
123 auto new_blk_size = blk_size & ~kSquashfsCompressedBit;
124 TEST_AND_RETURN_FALSE(new_blk_size <= header.block_size);
125 cur_offset += new_blk_size;
126 }
127
128 // If size is zero do not add the file.
129 if (cur_offset - start > 0) {
130 File file;
131 file.name = splits[0].as_string();
132 file.extents = {ExtentForBytes(kBlockSize, start, cur_offset - start)};
133 files_.emplace_back(file);
134 }
135 }
136
137 // Sort all files by their offset in the squashfs.
138 std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
139 return a.extents[0].start_block() < b.extents[0].start_block();
140 });
141 // If there is any overlap between two consecutive extents, remove them. Here
142 // we are assuming all files have exactly one extent. If this assumption
143 // changes then this implementation needs to change too.
144 for (auto first = files_.begin(), second = first + 1;
145 first != files_.end() && second != files_.end();
146 second = first + 1) {
147 auto first_begin = first->extents[0].start_block();
148 auto first_end = first_begin + first->extents[0].num_blocks();
149 auto second_begin = second->extents[0].start_block();
150 auto second_end = second_begin + second->extents[0].num_blocks();
151 // Remove the first file if the size is zero.
152 if (first_end == first_begin) {
153 first = files_.erase(first);
154 } else if (first_end > second_begin) { // We found a collision.
155 if (second_end <= first_end) {
156 // Second file is inside the first file, remove the second file.
157 second = files_.erase(second);
158 } else if (first_begin == second_begin) {
159 // First file is inside the second file, remove the first file.
160 first = files_.erase(first);
161 } else {
162 // Remove overlapping extents from the first file.
163 first->extents[0].set_num_blocks(second_begin - first_begin);
164 ++first;
165 }
166 } else {
167 ++first;
168 }
169 }
170
171 // Find all the metadata including superblock and add them to the list of
172 // files.
173 ExtentRanges file_extents;
174 for (const auto& file : files_) {
175 file_extents.AddExtents(file.extents);
176 }
177 vector<Extent> full = {
178 ExtentForRange(0, (size_ + kBlockSize - 1) / kBlockSize)};
179 auto metadata_extents = FilterExtentRanges(full, file_extents);
180 // For now there should be at most two extents. One for superblock and one for
181 // metadata at the end. Just create appropriate files with <metadata-i> name.
182 // We can add all these extents as one metadata too, but that violates the
183 // contiguous write optimization.
184 for (size_t i = 0; i < metadata_extents.size(); i++) {
185 File file;
186 file.name = "<metadata-" + std::to_string(i) + ">";
187 file.extents = {metadata_extents[i]};
188 files_.emplace_back(file);
189 }
190
191 // Do one last sort before returning.
192 std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
193 return a.extents[0].start_block() < b.extents[0].start_block();
194 });
195 return true;
196}
197
198unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFile(
199 const string& sqfs_path) {
200 if (sqfs_path.empty())
201 return nullptr;
202
203 brillo::StreamPtr sqfs_file =
204 brillo::FileStream::Open(base::FilePath(sqfs_path),
205 brillo::Stream::AccessMode::READ,
206 brillo::FileStream::Disposition::OPEN_EXISTING,
207 nullptr);
208 if (!sqfs_file) {
209 LOG(ERROR) << "Unable to open " << sqfs_path << " for reading.";
210 return nullptr;
211 }
212
213 SquashfsHeader header;
214 brillo::Blob blob(kSquashfsSuperBlockSize);
215 if (!sqfs_file->ReadAllBlocking(blob.data(), blob.size(), nullptr)) {
216 LOG(ERROR) << "Unable to read from file: " << sqfs_path;
217 return nullptr;
218 }
219 if (!ReadSquashfsHeader(blob, &header) || !CheckHeader(header)) {
220 // This is not necessary an error.
221 return nullptr;
222 }
223
224 // Read the map file.
225 string filemap;
226 if (!GetFileMapContent(sqfs_path, &filemap)) {
227 LOG(ERROR) << "Failed to produce squashfs map file: " << sqfs_path;
228 return nullptr;
229 }
230
231 unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
232 if (!sqfs->Init(filemap, sqfs_file->GetSize(), header)) {
233 LOG(ERROR) << "Failed to initialized the Squashfs file system";
234 return nullptr;
235 }
236 // TODO(ahassani): Add a function that initializes the puffin related extents.
237 return sqfs;
238}
239
240unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFileMap(
241 const string& filemap, size_t size, const SquashfsHeader& header) {
242 if (!CheckHeader(header)) {
243 LOG(ERROR) << "Invalid Squashfs super block!";
244 return nullptr;
245 }
246
247 unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
248 if (!sqfs->Init(filemap, size, header)) {
249 LOG(ERROR) << "Failed to initialize the Squashfs file system using filemap";
250 return nullptr;
251 }
252 // TODO(ahassani): Add a function that initializes the puffin related extents.
253 return sqfs;
254}
255
256size_t SquashfsFilesystem::GetBlockSize() const {
257 return kBlockSize;
258}
259
260size_t SquashfsFilesystem::GetBlockCount() const {
261 return size_ / kBlockSize;
262}
263
264bool SquashfsFilesystem::GetFiles(vector<File>* files) const {
265 files->insert(files->end(), files_.begin(), files_.end());
266 return true;
267}
268
269bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
270 // Settings not supported in squashfs.
271 LOG(ERROR) << "squashfs doesn't support LoadSettings().";
272 return false;
273}
274
275bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) {
276 SquashfsHeader header;
277 return ReadSquashfsHeader(blob, &header) && CheckHeader(header);
278}
279
280} // namespace chromeos_update_engine