blob: 1e9f05333b6a104d526a7d0feb0f39c23ef2ce02 [file] [log] [blame]
Jeff Browne735f232011-11-14 18:29:15 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "BasicHashtable"
18
19#include <math.h>
20
21#include <utils/Log.h>
22#include <utils/BasicHashtable.h>
23#include <utils/misc.h>
24
Sergio Girod2529f22015-09-23 16:22:59 +010025#include "SharedBuffer.h"
26
Jeff Browne735f232011-11-14 18:29:15 -080027namespace android {
28
29BasicHashtableImpl::BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
30 size_t minimumInitialCapacity, float loadFactor) :
31 mBucketSize(entrySize + sizeof(Bucket)), mHasTrivialDestructor(hasTrivialDestructor),
32 mLoadFactor(loadFactor), mSize(0),
33 mFilledBuckets(0), mBuckets(NULL) {
34 determineCapacity(minimumInitialCapacity, mLoadFactor, &mBucketCount, &mCapacity);
35}
36
37BasicHashtableImpl::BasicHashtableImpl(const BasicHashtableImpl& other) :
38 mBucketSize(other.mBucketSize), mHasTrivialDestructor(other.mHasTrivialDestructor),
39 mCapacity(other.mCapacity), mLoadFactor(other.mLoadFactor),
40 mSize(other.mSize), mFilledBuckets(other.mFilledBuckets),
41 mBucketCount(other.mBucketCount), mBuckets(other.mBuckets) {
42 if (mBuckets) {
43 SharedBuffer::bufferFromData(mBuckets)->acquire();
44 }
45}
46
Alex Ray0d8f3d62013-07-17 16:57:21 -070047BasicHashtableImpl::~BasicHashtableImpl()
48{
49}
50
Sergio Girod2529f22015-09-23 16:22:59 +010051void BasicHashtableImpl::edit() {
52 if (mBuckets && !SharedBuffer::bufferFromData(mBuckets)->onlyOwner()) {
53 clone();
54 }
55}
56
Jeff Browne735f232011-11-14 18:29:15 -080057void BasicHashtableImpl::dispose() {
58 if (mBuckets) {
59 releaseBuckets(mBuckets, mBucketCount);
60 }
61}
62
63void BasicHashtableImpl::clone() {
64 if (mBuckets) {
65 void* newBuckets = allocateBuckets(mBucketCount);
66 copyBuckets(mBuckets, newBuckets, mBucketCount);
67 releaseBuckets(mBuckets, mBucketCount);
68 mBuckets = newBuckets;
69 }
70}
71
72void BasicHashtableImpl::setTo(const BasicHashtableImpl& other) {
73 if (mBuckets) {
74 releaseBuckets(mBuckets, mBucketCount);
75 }
76
77 mCapacity = other.mCapacity;
78 mLoadFactor = other.mLoadFactor;
79 mSize = other.mSize;
80 mFilledBuckets = other.mFilledBuckets;
81 mBucketCount = other.mBucketCount;
82 mBuckets = other.mBuckets;
83
84 if (mBuckets) {
85 SharedBuffer::bufferFromData(mBuckets)->acquire();
86 }
87}
88
89void BasicHashtableImpl::clear() {
90 if (mBuckets) {
91 if (mFilledBuckets) {
92 SharedBuffer* sb = SharedBuffer::bufferFromData(mBuckets);
93 if (sb->onlyOwner()) {
94 destroyBuckets(mBuckets, mBucketCount);
Raph Levienb6ea1752012-10-25 23:11:13 -070095 for (size_t i = 0; i < mBucketCount; i++) {
Jeff Browne735f232011-11-14 18:29:15 -080096 Bucket& bucket = bucketAt(mBuckets, i);
97 bucket.cookie = 0;
98 }
99 } else {
100 releaseBuckets(mBuckets, mBucketCount);
101 mBuckets = NULL;
102 }
103 mFilledBuckets = 0;
104 }
105 mSize = 0;
106 }
107}
108
109ssize_t BasicHashtableImpl::next(ssize_t index) const {
110 if (mSize) {
111 while (size_t(++index) < mBucketCount) {
112 const Bucket& bucket = bucketAt(mBuckets, index);
113 if (bucket.cookie & Bucket::PRESENT) {
114 return index;
115 }
116 }
117 }
118 return -1;
119}
120
121ssize_t BasicHashtableImpl::find(ssize_t index, hash_t hash,
122 const void* __restrict__ key) const {
123 if (!mSize) {
124 return -1;
125 }
126
127 hash = trimHash(hash);
128 if (index < 0) {
129 index = chainStart(hash, mBucketCount);
130
131 const Bucket& bucket = bucketAt(mBuckets, size_t(index));
132 if (bucket.cookie & Bucket::PRESENT) {
133 if (compareBucketKey(bucket, key)) {
134 return index;
135 }
136 } else {
137 if (!(bucket.cookie & Bucket::COLLISION)) {
138 return -1;
139 }
140 }
141 }
142
143 size_t inc = chainIncrement(hash, mBucketCount);
144 for (;;) {
145 index = chainSeek(index, inc, mBucketCount);
146
147 const Bucket& bucket = bucketAt(mBuckets, size_t(index));
148 if (bucket.cookie & Bucket::PRESENT) {
149 if ((bucket.cookie & Bucket::HASH_MASK) == hash
150 && compareBucketKey(bucket, key)) {
151 return index;
152 }
153 }
154 if (!(bucket.cookie & Bucket::COLLISION)) {
155 return -1;
156 }
157 }
158}
159
160size_t BasicHashtableImpl::add(hash_t hash, const void* entry) {
161 if (!mBuckets) {
162 mBuckets = allocateBuckets(mBucketCount);
163 } else {
164 edit();
165 }
166
167 hash = trimHash(hash);
168 for (;;) {
169 size_t index = chainStart(hash, mBucketCount);
170 Bucket* bucket = &bucketAt(mBuckets, size_t(index));
171 if (bucket->cookie & Bucket::PRESENT) {
172 size_t inc = chainIncrement(hash, mBucketCount);
173 do {
174 bucket->cookie |= Bucket::COLLISION;
175 index = chainSeek(index, inc, mBucketCount);
176 bucket = &bucketAt(mBuckets, size_t(index));
177 } while (bucket->cookie & Bucket::PRESENT);
178 }
179
180 uint32_t collision = bucket->cookie & Bucket::COLLISION;
181 if (!collision) {
182 if (mFilledBuckets >= mCapacity) {
183 rehash(mCapacity * 2, mLoadFactor);
184 continue;
185 }
186 mFilledBuckets += 1;
187 }
188
189 bucket->cookie = collision | Bucket::PRESENT | hash;
190 mSize += 1;
191 initializeBucketEntry(*bucket, entry);
192 return index;
193 }
194}
195
196void BasicHashtableImpl::removeAt(size_t index) {
197 edit();
198
199 Bucket& bucket = bucketAt(mBuckets, index);
200 bucket.cookie &= ~Bucket::PRESENT;
201 if (!(bucket.cookie & Bucket::COLLISION)) {
202 mFilledBuckets -= 1;
203 }
204 mSize -= 1;
205 if (!mHasTrivialDestructor) {
206 destroyBucketEntry(bucket);
207 }
208}
209
210void BasicHashtableImpl::rehash(size_t minimumCapacity, float loadFactor) {
211 if (minimumCapacity < mSize) {
212 minimumCapacity = mSize;
213 }
214 size_t newBucketCount, newCapacity;
215 determineCapacity(minimumCapacity, loadFactor, &newBucketCount, &newCapacity);
216
217 if (newBucketCount != mBucketCount || newCapacity != mCapacity) {
218 if (mBuckets) {
219 void* newBuckets;
220 if (mSize) {
221 newBuckets = allocateBuckets(newBucketCount);
222 for (size_t i = 0; i < mBucketCount; i++) {
223 const Bucket& fromBucket = bucketAt(mBuckets, i);
224 if (fromBucket.cookie & Bucket::PRESENT) {
225 hash_t hash = fromBucket.cookie & Bucket::HASH_MASK;
226 size_t index = chainStart(hash, newBucketCount);
227 Bucket* toBucket = &bucketAt(newBuckets, size_t(index));
228 if (toBucket->cookie & Bucket::PRESENT) {
229 size_t inc = chainIncrement(hash, newBucketCount);
230 do {
231 toBucket->cookie |= Bucket::COLLISION;
232 index = chainSeek(index, inc, newBucketCount);
233 toBucket = &bucketAt(newBuckets, size_t(index));
234 } while (toBucket->cookie & Bucket::PRESENT);
235 }
236 toBucket->cookie = Bucket::PRESENT | hash;
237 initializeBucketEntry(*toBucket, fromBucket.entry);
238 }
239 }
240 } else {
241 newBuckets = NULL;
242 }
243 releaseBuckets(mBuckets, mBucketCount);
244 mBuckets = newBuckets;
245 mFilledBuckets = mSize;
246 }
247 mBucketCount = newBucketCount;
248 mCapacity = newCapacity;
249 }
250 mLoadFactor = loadFactor;
251}
252
253void* BasicHashtableImpl::allocateBuckets(size_t count) const {
254 size_t bytes = count * mBucketSize;
255 SharedBuffer* sb = SharedBuffer::alloc(bytes);
256 LOG_ALWAYS_FATAL_IF(!sb, "Could not allocate %u bytes for hashtable with %u buckets.",
257 uint32_t(bytes), uint32_t(count));
258 void* buckets = sb->data();
259 for (size_t i = 0; i < count; i++) {
260 Bucket& bucket = bucketAt(buckets, i);
261 bucket.cookie = 0;
262 }
263 return buckets;
264}
265
266void BasicHashtableImpl::releaseBuckets(void* __restrict__ buckets, size_t count) const {
267 SharedBuffer* sb = SharedBuffer::bufferFromData(buckets);
268 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
269 destroyBuckets(buckets, count);
270 SharedBuffer::dealloc(sb);
271 }
272}
273
274void BasicHashtableImpl::destroyBuckets(void* __restrict__ buckets, size_t count) const {
275 if (!mHasTrivialDestructor) {
276 for (size_t i = 0; i < count; i++) {
277 Bucket& bucket = bucketAt(buckets, i);
278 if (bucket.cookie & Bucket::PRESENT) {
279 destroyBucketEntry(bucket);
280 }
281 }
282 }
283}
284
285void BasicHashtableImpl::copyBuckets(const void* __restrict__ fromBuckets,
286 void* __restrict__ toBuckets, size_t count) const {
287 for (size_t i = 0; i < count; i++) {
288 const Bucket& fromBucket = bucketAt(fromBuckets, i);
289 Bucket& toBucket = bucketAt(toBuckets, i);
290 toBucket.cookie = fromBucket.cookie;
291 if (fromBucket.cookie & Bucket::PRESENT) {
292 initializeBucketEntry(toBucket, fromBucket.entry);
293 }
294 }
295}
296
297// Table of 31-bit primes where each prime is no less than twice as large
298// as the previous one. Generated by "primes.py".
299static size_t PRIMES[] = {
300 5,
301 11,
302 23,
303 47,
304 97,
305 197,
306 397,
307 797,
308 1597,
309 3203,
310 6421,
311 12853,
312 25717,
313 51437,
314 102877,
315 205759,
316 411527,
317 823117,
318 1646237,
319 3292489,
320 6584983,
321 13169977,
322 26339969,
323 52679969,
324 105359939,
325 210719881,
326 421439783,
327 842879579,
328 1685759167,
329 0,
330};
331
332void BasicHashtableImpl::determineCapacity(size_t minimumCapacity, float loadFactor,
333 size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity) {
334 LOG_ALWAYS_FATAL_IF(loadFactor <= 0.0f || loadFactor > 1.0f,
335 "Invalid load factor %0.3f. Must be in the range (0, 1].", loadFactor);
336
337 size_t count = ceilf(minimumCapacity / loadFactor) + 1;
338 size_t i = 0;
339 while (count > PRIMES[i] && i < NELEM(PRIMES)) {
340 i++;
341 }
342 count = PRIMES[i];
343 LOG_ALWAYS_FATAL_IF(!count, "Could not determine required number of buckets for "
344 "hashtable with minimum capacity %u and load factor %0.3f.",
345 uint32_t(minimumCapacity), loadFactor);
346 *outBucketCount = count;
347 *outCapacity = ceilf((count - 1) * loadFactor);
348}
349
350}; // namespace android