blob: 9410ce93fc14f9b8769f7c1d967b6c3cb99be45e [file] [log] [blame]
Pawin Vongmasa36653902018-11-15 00:10:25 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "C2AllocatorIon"
19#include <utils/Log.h>
20
21#include <list>
22
23#include <ion/ion.h>
24#include <sys/mman.h>
25#include <unistd.h> // getpagesize, size_t, close, dup
26
27#include <C2AllocatorIon.h>
28#include <C2Buffer.h>
29#include <C2Debug.h>
30#include <C2ErrnoUtils.h>
shubang69397a92020-02-17 23:13:35 -080031#include <C2HandleIonInternal.h>
Pawin Vongmasa36653902018-11-15 00:10:25 -080032
33namespace android {
34
35namespace {
36 constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
37}
38
39/* size_t <=> int(lo), int(hi) conversions */
40constexpr inline int size2intLo(size_t s) {
41 return int(s & 0xFFFFFFFF);
42}
43
44constexpr inline int size2intHi(size_t s) {
45 // cast to uint64_t as size_t may be 32 bits wide
46 return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
47}
48
49constexpr inline size_t ints2size(int intLo, int intHi) {
50 // convert in 2 stages to 64 bits as intHi may be negative
51 return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
52}
53
54/* ========================================= ION HANDLE ======================================== */
55/**
56 * ION handle
57 *
58 * There can be only a sole ion client per process, this is captured in the ion fd that is passed
59 * to the constructor, but this should be managed by the ion buffer allocator/mapper.
60 *
61 * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
62 * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
63 * a refcount.
64 *
65 * This handle will not capture mapped fd-s as updating that would require a global mutex.
66 */
67
Pawin Vongmasa36653902018-11-15 00:10:25 -080068const C2Handle C2HandleIon::cHeader = {
69 C2HandleIon::version,
70 C2HandleIon::numFds,
71 C2HandleIon::numInts,
72 {}
73};
74
75// static
76bool C2HandleIon::isValid(const C2Handle * const o) {
77 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
78 return false;
79 }
80 const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
81 return other->mInts.mMagic == kMagic;
82}
83
84// TODO: is the dup of an ion fd identical to ion_share?
85
86/* ======================================= ION ALLOCATION ====================================== */
87class C2AllocationIon : public C2LinearAllocation {
88public:
89 /* Interface methods */
90 virtual c2_status_t map(
91 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
92 void **addr /* nonnull */) override;
93 virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
94 virtual ~C2AllocationIon() override;
95 virtual const C2Handle *handle() const override;
96 virtual id_t getAllocatorId() const override;
97 virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
98
99 // internal methods
100 C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
101 C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
102
103 c2_status_t status() const;
104
105protected:
106 class Impl;
Praveen Chavan86320e32019-01-22 14:47:04 -0800107 class ImplV2;
Pawin Vongmasa36653902018-11-15 00:10:25 -0800108 Impl *mImpl;
109
110 // TODO: we could make this encapsulate shared_ptr and copiable
111 C2_DO_NOT_COPY(C2AllocationIon);
112};
113
114class C2AllocationIon::Impl {
Praveen Chavan86320e32019-01-22 14:47:04 -0800115protected:
Pawin Vongmasa36653902018-11-15 00:10:25 -0800116 /**
117 * Constructs an ion allocation.
118 *
119 * \note We always create an ion allocation, even if the allocation or import fails
120 * so that we can capture the error.
121 *
122 * \param ionFd ion client (ownership transferred to created object)
123 * \param capacity size of allocation
124 * \param bufferFd buffer handle (ownership transferred to created object). Must be
125 * invalid if err is not 0.
126 * \param buffer ion buffer user handle (ownership transferred to created object). Must be
127 * invalid if err is not 0.
128 * \param err errno during buffer allocation or import
129 */
130 Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
131 : mIonFd(ionFd),
132 mHandle(bufferFd, capacity),
133 mBuffer(buffer),
134 mId(id),
135 mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
136 mMapFd(-1) {
137 if (mInit != C2_OK) {
138 // close ionFd now on error
139 if (mIonFd >= 0) {
140 close(mIonFd);
141 mIonFd = -1;
142 }
143 // C2_CHECK(bufferFd < 0);
144 // C2_CHECK(buffer < 0);
145 }
146 }
147
148public:
149 /**
150 * Constructs an ion allocation by importing a shared buffer fd.
151 *
152 * \param ionFd ion client (ownership transferred to created object)
153 * \param capacity size of allocation
154 * \param bufferFd buffer handle (ownership transferred to created object)
155 *
156 * \return created ion allocation (implementation) which may be invalid if the
157 * import failed.
158 */
Praveen Chavan86320e32019-01-22 14:47:04 -0800159 static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800160
161 /**
162 * Constructs an ion allocation by allocating an ion buffer.
163 *
164 * \param ionFd ion client (ownership transferred to created object)
165 * \param size size of allocation
166 * \param align desired alignment of allocation
167 * \param heapMask mask of heaps considered
168 * \param flags ion allocation flags
169 *
170 * \return created ion allocation (implementation) which may be invalid if the
171 * allocation failed.
172 */
Praveen Chavan86320e32019-01-22 14:47:04 -0800173 static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800174
175 c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
176 (void)fence; // TODO: wait for fence
177 *addr = nullptr;
178 if (!mMappings.empty()) {
179 ALOGV("multiple map");
180 // TODO: technically we should return DUPLICATE here, but our block views don't
181 // actually unmap, so we end up remapping an ion buffer multiple times.
182 //
183 // return C2_DUPLICATE;
184 }
185 if (size == 0) {
186 return C2_BAD_VALUE;
187 }
188
189 int prot = PROT_NONE;
190 int flags = MAP_SHARED;
191 if (usage.expected & C2MemoryUsage::CPU_READ) {
192 prot |= PROT_READ;
193 }
194 if (usage.expected & C2MemoryUsage::CPU_WRITE) {
195 prot |= PROT_WRITE;
196 }
197
198 size_t alignmentBytes = offset % PAGE_SIZE;
199 size_t mapOffset = offset - alignmentBytes;
200 size_t mapSize = size + alignmentBytes;
201 Mapping map = { nullptr, alignmentBytes, mapSize };
202
Praveen Chavan86320e32019-01-22 14:47:04 -0800203 c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800204 if (map.addr) {
Gopalakrishnan Nallasamy416da6e2022-01-11 23:44:20 -0800205 std::lock_guard<std::mutex> guard(mMutexMappings);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800206 mMappings.push_back(map);
207 }
208 return err;
209 }
210
211 c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
Praveen Chavan86320e32019-01-22 14:47:04 -0800212 if (mMappings.empty()) {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800213 ALOGD("tried to unmap unmapped buffer");
214 return C2_NOT_FOUND;
215 }
Gopalakrishnan Nallasamy416da6e2022-01-11 23:44:20 -0800216 { // Scope for the lock_guard of mMutexMappings.
217 std::lock_guard<std::mutex> guard(mMutexMappings);
218 for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
219 if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
220 size + it->alignmentBytes != it->size) {
221 continue;
222 }
223 int err = munmap(it->addr, it->size);
224 if (err != 0) {
225 ALOGD("munmap failed");
226 return c2_map_errno<EINVAL>(errno);
227 }
228 if (fence) {
229 *fence = C2Fence(); // not using fences
230 }
231 (void)mMappings.erase(it);
232 ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size,
233 mHandle.bufferFd());
234 return C2_OK;
Pawin Vongmasa36653902018-11-15 00:10:25 -0800235 }
Pawin Vongmasa36653902018-11-15 00:10:25 -0800236 }
237 ALOGD("unmap failed to find specified map");
238 return C2_BAD_VALUE;
239 }
240
Praveen Chavan86320e32019-01-22 14:47:04 -0800241 virtual ~Impl() {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800242 if (!mMappings.empty()) {
243 ALOGD("Dangling mappings!");
Gopalakrishnan Nallasamy416da6e2022-01-11 23:44:20 -0800244 std::lock_guard<std::mutex> guard(mMutexMappings);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800245 for (const Mapping &map : mMappings) {
246 (void)munmap(map.addr, map.size);
247 }
248 }
249 if (mMapFd >= 0) {
250 close(mMapFd);
251 mMapFd = -1;
252 }
253 if (mInit == C2_OK) {
Praveen Chavan86320e32019-01-22 14:47:04 -0800254 if (mBuffer >= 0) {
255 (void)ion_free(mIonFd, mBuffer);
256 }
Pawin Vongmasa36653902018-11-15 00:10:25 -0800257 native_handle_close(&mHandle);
258 }
259 if (mIonFd >= 0) {
260 close(mIonFd);
261 }
262 }
263
264 c2_status_t status() const {
265 return mInit;
266 }
267
268 const C2Handle *handle() const {
269 return &mHandle;
270 }
271
272 C2Allocator::id_t getAllocatorId() const {
273 return mId;
274 }
275
Praveen Chavan86320e32019-01-22 14:47:04 -0800276 virtual ion_user_handle_t ionHandle() const {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800277 return mBuffer;
278 }
279
Praveen Chavan86320e32019-01-22 14:47:04 -0800280protected:
281 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
282 int prot, int flags, void** base, void** addr) {
283 c2_status_t err = C2_OK;
284 if (mMapFd == -1) {
285 int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
286 flags, mapOffset, (unsigned char**)base, &mMapFd);
287 ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
288 "offset = %zu) returned (%d)",
289 mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
290 if (ret) {
291 mMapFd = -1;
292 *base = *addr = nullptr;
293 err = c2_map_errno<EINVAL>(-ret);
294 } else {
295 *addr = (uint8_t *)*base + alignmentBytes;
296 }
297 } else {
298 *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
299 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
300 "returned (%d)",
301 mapSize, prot, flags, mMapFd, mapOffset, errno);
302 if (*base == MAP_FAILED) {
303 *base = *addr = nullptr;
304 err = c2_map_errno<EINVAL>(errno);
305 } else {
306 *addr = (uint8_t *)*base + alignmentBytes;
307 }
308 }
309 return err;
310 }
311
Pawin Vongmasa36653902018-11-15 00:10:25 -0800312 int mIonFd;
313 C2HandleIon mHandle;
314 ion_user_handle_t mBuffer;
315 C2Allocator::id_t mId;
316 c2_status_t mInit;
317 int mMapFd; // only one for now
318 struct Mapping {
319 void *addr;
320 size_t alignmentBytes;
321 size_t size;
322 };
323 std::list<Mapping> mMappings;
Gopalakrishnan Nallasamy416da6e2022-01-11 23:44:20 -0800324 std::mutex mMutexMappings;
Pawin Vongmasa36653902018-11-15 00:10:25 -0800325};
326
Praveen Chavan86320e32019-01-22 14:47:04 -0800327class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
328public:
329 /**
330 * Constructs an ion allocation for platforms with new (ion_4.12.h) api
331 *
332 * \note We always create an ion allocation, even if the allocation or import fails
333 * so that we can capture the error.
334 *
335 * \param ionFd ion client (ownership transferred to created object)
336 * \param capacity size of allocation
337 * \param bufferFd buffer handle (ownership transferred to created object). Must be
338 * invalid if err is not 0.
339 * \param err errno during buffer allocation or import
340 */
341 ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
342 : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
343 }
344
345 virtual ~ImplV2() = default;
346
347 virtual ion_user_handle_t ionHandle() const {
348 return mHandle.bufferFd();
349 }
350
351protected:
352 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
353 int prot, int flags, void** base, void** addr) {
354 c2_status_t err = C2_OK;
355 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
356 ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
357 "returned (%d)",
358 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
359 if (*base == MAP_FAILED) {
360 *base = *addr = nullptr;
361 err = c2_map_errno<EINVAL>(errno);
362 } else {
363 *addr = (uint8_t *)*base + alignmentBytes;
364 }
365 return err;
366 }
367
368};
369
370C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
371 C2Allocator::id_t id) {
372 int ret = 0;
373 if (ion_is_legacy(ionFd)) {
374 ion_user_handle_t buffer = -1;
375 ret = ion_import(ionFd, bufferFd, &buffer);
376 return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
377 } else {
378 return new ImplV2(ionFd, capacity, bufferFd, id, ret);
379 }
380}
381
382C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
383 unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
384 int bufferFd = -1;
385 ion_user_handle_t buffer = -1;
386 size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
387 int ret;
388
389 if (ion_is_legacy(ionFd)) {
390 ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
391 ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
392 "returned (%d) ; buffer = %d",
393 ionFd, alignedSize, align, heapMask, flags, ret, buffer);
394 if (ret == 0) {
395 // get buffer fd for native handle constructor
396 ret = ion_share(ionFd, buffer, &bufferFd);
397 if (ret != 0) {
398 ion_free(ionFd, buffer);
399 buffer = -1;
400 }
401 }
402 return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
403
404 } else {
405 ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
406 ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
407 "returned (%d) ; bufferFd = %d",
408 ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
409
410 return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
411 }
412}
413
Pawin Vongmasa36653902018-11-15 00:10:25 -0800414c2_status_t C2AllocationIon::map(
415 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
416 return mImpl->map(offset, size, usage, fence, addr);
417}
418
419c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
420 return mImpl->unmap(addr, size, fence);
421}
422
423c2_status_t C2AllocationIon::status() const {
424 return mImpl->status();
425}
426
427C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
428 return mImpl->getAllocatorId();
429}
430
431bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
432 if (!other || other->getAllocatorId() != getAllocatorId()) {
433 return false;
434 }
435 // get user handle to compare objects
436 std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
437 return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
438}
439
440const C2Handle *C2AllocationIon::handle() const {
441 return mImpl->handle();
442}
443
444C2AllocationIon::~C2AllocationIon() {
445 delete mImpl;
446}
447
448C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
449 unsigned heapMask, unsigned flags, C2Allocator::id_t id)
450 : C2LinearAllocation(size),
451 mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
452
453C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
454 : C2LinearAllocation(size),
455 mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
456
457/* ======================================= ION ALLOCATOR ====================================== */
458C2AllocatorIon::C2AllocatorIon(id_t id)
459 : mInit(C2_OK),
460 mIonFd(ion_open()) {
461 if (mIonFd < 0) {
462 switch (errno) {
463 case ENOENT: mInit = C2_OMITTED; break;
464 default: mInit = c2_map_errno<EACCES>(errno); break;
465 }
466 } else {
467 C2MemoryUsage minUsage = { 0, 0 };
468 C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
469 Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
470 mTraits = std::make_shared<Traits>(traits);
471 mBlockSize = ::getpagesize();
472 }
473}
474
475C2AllocatorIon::~C2AllocatorIon() {
476 if (mInit == C2_OK) {
477 ion_close(mIonFd);
478 }
479}
480
481C2Allocator::id_t C2AllocatorIon::getId() const {
482 std::lock_guard<std::mutex> lock(mUsageMapperLock);
483 return mTraits->id;
484}
485
486C2String C2AllocatorIon::getName() const {
487 std::lock_guard<std::mutex> lock(mUsageMapperLock);
488 return mTraits->name;
489}
490
491std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
492 std::lock_guard<std::mutex> lock(mUsageMapperLock);
493 return mTraits;
494}
495
496void C2AllocatorIon::setUsageMapper(
497 const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
498 std::lock_guard<std::mutex> lock(mUsageMapperLock);
499 mUsageMapperCache.clear();
500 mUsageMapperLru.clear();
501 mUsageMapper = mapper;
502 Traits traits = {
503 mTraits->name, mTraits->id, LINEAR,
504 C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
505 };
506 mTraits = std::make_shared<Traits>(traits);
507 mBlockSize = blockSize;
508}
509
510std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
511 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
512}
513
514c2_status_t C2AllocatorIon::mapUsage(
515 C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
516 std::lock_guard<std::mutex> lock(mUsageMapperLock);
517 c2_status_t res = C2_OK;
518 // align capacity
519 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
520 MapperKey key = std::make_pair(usage.expected, capacity);
521 auto entry = mUsageMapperCache.find(key);
522 if (entry == mUsageMapperCache.end()) {
523 if (mUsageMapper) {
524 res = mUsageMapper(usage, capacity, align, heapMask, flags);
525 } else {
526 *align = 0; // TODO make this 1
527 *heapMask = ~0; // default mask
Lajos Molnar8eec9d02019-05-22 15:08:49 -0700528 if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
529 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
530 } else {
531 *flags = 0; // default flags
532 }
Pawin Vongmasa36653902018-11-15 00:10:25 -0800533 res = C2_NO_INIT;
534 }
535 // add usage to cache
536 MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
537 mUsageMapperLru.emplace_front(key, value);
538 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
539 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
540 // remove LRU entry
541 MapperKey lruKey = mUsageMapperLru.front().first;
542 mUsageMapperCache.erase(lruKey);
543 mUsageMapperLru.pop_back();
544 }
545 } else {
546 // move entry to MRU
547 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
548 const MapperValue &value = entry->second->second;
549 std::tie(*align, *heapMask, *flags, res) = value;
550 }
551 return res;
552}
553
554c2_status_t C2AllocatorIon::newLinearAllocation(
555 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
556 if (allocation == nullptr) {
557 return C2_BAD_VALUE;
558 }
559
560 allocation->reset();
561 if (mInit != C2_OK) {
562 return mInit;
563 }
564
565 size_t align = 0;
566 unsigned heapMask = ~0;
567 unsigned flags = 0;
568 c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
569 if (ret && ret != C2_NO_INIT) {
570 return ret;
571 }
572
573 std::shared_ptr<C2AllocationIon> alloc
Mengjie Xie51f5aba2019-10-09 15:00:00 +0800574 = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
Pawin Vongmasa36653902018-11-15 00:10:25 -0800575 ret = alloc->status();
576 if (ret == C2_OK) {
577 *allocation = alloc;
578 }
579 return ret;
580}
581
582c2_status_t C2AllocatorIon::priorLinearAllocation(
583 const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
584 *allocation = nullptr;
585 if (mInit != C2_OK) {
586 return mInit;
587 }
588
589 if (!C2HandleIon::isValid(handle)) {
590 return C2_BAD_VALUE;
591 }
592
593 // TODO: get capacity and validate it
594 const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
595 std::shared_ptr<C2AllocationIon> alloc
Mengjie Xie51f5aba2019-10-09 15:00:00 +0800596 = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
Pawin Vongmasa36653902018-11-15 00:10:25 -0800597 c2_status_t ret = alloc->status();
598 if (ret == C2_OK) {
599 *allocation = alloc;
600 native_handle_delete(const_cast<native_handle_t*>(
601 reinterpret_cast<const native_handle_t*>(handle)));
602 }
603 return ret;
604}
605
606bool C2AllocatorIon::isValid(const C2Handle* const o) {
607 return C2HandleIon::isValid(o);
608}
609
610} // namespace android
611