blob: b594ca56dd868f3cc9e8f5fc576a3c46b4f2d122 [file] [log] [blame]
Hans Boehm4a8276c2016-06-01 15:29:55 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <utils/StrongPointer.h>
20#include <utils/RefBase.h>
21
22#include <thread>
23#include <atomic>
24#include <sched.h>
25#include <errno.h>
26
27// Enhanced version of StrongPointer_test, but using RefBase underneath.
28
29using namespace android;
30
31static constexpr int NITERS = 1000000;
32
33static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
34
35class Foo : public RefBase {
36public:
37 Foo(bool* deleted_check) : mDeleted(deleted_check) {
38 *mDeleted = false;
39 }
40
41 ~Foo() {
42 *mDeleted = true;
43 }
44private:
45 bool* mDeleted;
46};
47
Hans Boehm029b12e2019-03-04 15:22:10 -080048// A version of Foo that ensures that all objects are allocated at the same
49// address. No more than one can be allocated at a time. Thread-hostile.
50class FooFixedAlloc : public RefBase {
51public:
52 static void* operator new(size_t size) {
53 if (mAllocCount != 0) {
54 abort();
55 }
56 mAllocCount = 1;
57 if (theMemory == nullptr) {
58 theMemory = malloc(size);
59 }
60 return theMemory;
61 }
62
63 static void operator delete(void *p) {
64 if (mAllocCount != 1 || p != theMemory) {
65 abort();
66 }
67 mAllocCount = 0;
68 }
69
70 FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
71 *mDeleted = false;
72 }
73
74 ~FooFixedAlloc() {
75 *mDeleted = true;
76 }
77private:
78 bool* mDeleted;
79 static int mAllocCount;
80 static void* theMemory;
81};
82
83int FooFixedAlloc::mAllocCount(0);
84void* FooFixedAlloc::theMemory(nullptr);
85
Hans Boehm4a8276c2016-06-01 15:29:55 -070086TEST(RefBase, StrongMoves) {
87 bool isDeleted;
88 Foo* foo = new Foo(&isDeleted);
89 ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
90 ASSERT_FALSE(isDeleted) << "Already deleted...?";
91 sp<Foo> sp1(foo);
92 wp<Foo> wp1(sp1);
93 ASSERT_EQ(1, foo->getStrongCount());
94 // Weak count includes both strong and weak references.
95 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
96 {
97 sp<Foo> sp2 = std::move(sp1);
98 ASSERT_EQ(1, foo->getStrongCount())
99 << "std::move failed, incremented refcnt";
100 ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
101 // The strong count isn't increasing, let's double check the old object
102 // is properly reset and doesn't early delete
103 sp1 = std::move(sp2);
104 }
105 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
106 {
107 // Now let's double check it deletes on time
108 sp<Foo> sp2 = std::move(sp1);
109 }
110 ASSERT_TRUE(isDeleted) << "foo was leaked!";
111 ASSERT_TRUE(wp1.promote().get() == nullptr);
112}
113
114TEST(RefBase, WeakCopies) {
115 bool isDeleted;
116 Foo* foo = new Foo(&isDeleted);
117 EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
118 ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
119 wp<Foo> wp1(foo);
120 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
121 {
122 wp<Foo> wp2 = wp1;
123 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
124 }
125 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
126 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
127 wp1 = nullptr;
Hans Boehm23c857e2016-08-02 18:39:30 -0700128 ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
Hans Boehm4a8276c2016-06-01 15:29:55 -0700129}
130
Hans Boehm029b12e2019-03-04 15:22:10 -0800131TEST(RefBase, Comparisons) {
132 bool isDeleted, isDeleted2;
133 Foo* foo = new Foo(&isDeleted);
134 Foo* foo2 = new Foo(&isDeleted2);
135 sp<Foo> sp1(foo);
136 sp<Foo> sp2(foo2);
137 wp<Foo> wp1(sp1);
138 wp<Foo> wp2(sp1);
139 wp<Foo> wp3(sp2);
140 ASSERT_TRUE(wp1 == wp2);
141 ASSERT_TRUE(wp1 == sp1);
142 ASSERT_TRUE(wp3 == sp2);
143 ASSERT_TRUE(wp1 != sp2);
144 ASSERT_TRUE(wp1 <= wp2);
145 ASSERT_TRUE(wp1 >= wp2);
146 ASSERT_FALSE(wp1 != wp2);
147 ASSERT_FALSE(wp1 > wp2);
148 ASSERT_FALSE(wp1 < wp2);
149 ASSERT_FALSE(sp1 == sp2);
150 ASSERT_TRUE(sp1 != sp2);
151 bool sp1_smaller = sp1 < sp2;
152 wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
153 wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
154 ASSERT_TRUE(wp_smaller < wp_larger);
155 ASSERT_TRUE(wp_smaller != wp_larger);
156 ASSERT_TRUE(wp_smaller <= wp_larger);
157 ASSERT_FALSE(wp_smaller == wp_larger);
158 ASSERT_FALSE(wp_smaller > wp_larger);
159 ASSERT_FALSE(wp_smaller >= wp_larger);
160 sp2 = nullptr;
161 ASSERT_TRUE(isDeleted2);
162 ASSERT_FALSE(isDeleted);
163 ASSERT_FALSE(wp3 == sp2);
164 // Comparison results on weak pointers should not be affected.
165 ASSERT_TRUE(wp_smaller < wp_larger);
166 ASSERT_TRUE(wp_smaller != wp_larger);
167 ASSERT_TRUE(wp_smaller <= wp_larger);
168 ASSERT_FALSE(wp_smaller == wp_larger);
169 ASSERT_FALSE(wp_smaller > wp_larger);
170 ASSERT_FALSE(wp_smaller >= wp_larger);
171 wp2 = nullptr;
172 ASSERT_FALSE(wp1 == wp2);
173 ASSERT_TRUE(wp1 != wp2);
174 wp1.clear();
175 ASSERT_TRUE(wp1 == wp2);
176 ASSERT_FALSE(wp1 != wp2);
177 wp3.clear();
178 ASSERT_TRUE(wp1 == wp3);
179 ASSERT_FALSE(wp1 != wp3);
180 ASSERT_FALSE(isDeleted);
181 sp1.clear();
182 ASSERT_TRUE(isDeleted);
183 ASSERT_TRUE(sp1 == sp2);
184}
185
186// Check whether comparison against dead wp works, even if the object referenced
187// by the new wp happens to be at the same address.
188TEST(RefBase, ReplacedComparison) {
189 bool isDeleted, isDeleted2;
190 FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
191 sp<FooFixedAlloc> sp1(foo);
192 wp<FooFixedAlloc> wp1(sp1);
193 ASSERT_TRUE(wp1 == sp1);
194 sp1.clear(); // Deallocates the object.
195 ASSERT_TRUE(isDeleted);
196 FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
197 ASSERT_FALSE(isDeleted2);
198 ASSERT_EQ(foo, foo2); // Not technically a legal comparison, but ...
199 sp<FooFixedAlloc> sp2(foo2);
200 wp<FooFixedAlloc> wp2(sp2);
201 ASSERT_TRUE(sp2 == wp2);
202 ASSERT_FALSE(sp2 != wp2);
203 ASSERT_TRUE(sp2 != wp1);
204 ASSERT_FALSE(sp2 == wp1);
205 ASSERT_FALSE(sp2 == sp1); // sp1 is null.
206 ASSERT_FALSE(wp1 == wp2); // wp1 refers to old object.
207 ASSERT_TRUE(wp1 != wp2);
208 ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
209 ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
210 ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
211 ASSERT_FALSE(wp1 == nullptr);
212 wp1 = sp2;
213 ASSERT_TRUE(wp1 == wp2);
214 ASSERT_FALSE(wp1 != wp2);
215}
Hans Boehm4a8276c2016-06-01 15:29:55 -0700216
217// Set up a situation in which we race with visit2AndRremove() to delete
218// 2 strong references. Bar destructor checks that there are no early
219// deletions and prior updates are visible to destructor.
220class Bar : public RefBase {
221public:
222 Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
223 mDeleteCount(delete_count) {
224 }
225
226 ~Bar() {
227 EXPECT_TRUE(mVisited1);
228 EXPECT_TRUE(mVisited2);
229 (*mDeleteCount)++;
230 }
231 bool mVisited1;
232 bool mVisited2;
233private:
234 std::atomic<int>* mDeleteCount;
235};
236
237static sp<Bar> buffer;
238static std::atomic<bool> bufferFull(false);
239
240// Wait until bufferFull has value val.
241static inline void waitFor(bool val) {
242 while (bufferFull != val) {}
243}
244
245cpu_set_t otherCpus;
246
Hans Boehm23c857e2016-08-02 18:39:30 -0700247// Divide the cpus we're allowed to run on into myCpus and otherCpus.
248// Set origCpus to the processors we were originally allowed to run on.
249// Return false if origCpus doesn't include at least processors 0 and 1.
250static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
251 cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
252 if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
253 return false;
254 }
255 if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
256 return false;
257 }
258 CPU_ZERO(myCpus);
259 CPU_ZERO(otherCpus);
260 CPU_OR(myCpus, myCpus, origCpus);
261 CPU_OR(otherCpus, otherCpus, origCpus);
262 for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
263 // I get the even cores, the other thread gets the odd ones.
264 if (i & 1) {
265 CPU_CLR(i, myCpus);
266 } else {
267 CPU_CLR(i, otherCpus);
268 }
269 }
270 return true;
271}
272
Hans Boehm4a8276c2016-06-01 15:29:55 -0700273static void visit2AndRemove() {
Hans Boehm4a8276c2016-06-01 15:29:55 -0700274 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
275 FAIL() << "setaffinity returned:" << errno;
276 }
277 for (int i = 0; i < NITERS; ++i) {
278 waitFor(true);
279 buffer->mVisited2 = true;
280 buffer = nullptr;
281 bufferFull = false;
282 }
283}
284
285TEST(RefBase, RacingDestructors) {
286 cpu_set_t origCpus;
287 cpu_set_t myCpus;
288 // Restrict us and the helper thread to disjoint cpu sets.
289 // This prevents us from getting scheduled against each other,
Hans Boehm23c857e2016-08-02 18:39:30 -0700290 // which would be atrociously slow.
291 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
Hans Boehm4a8276c2016-06-01 15:29:55 -0700292 std::thread t(visit2AndRemove);
293 std::atomic<int> deleteCount(0);
Hans Boehm4a8276c2016-06-01 15:29:55 -0700294 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
295 FAIL() << "setaffinity returned:" << errno;
296 }
297 for (int i = 0; i < NITERS; ++i) {
298 waitFor(false);
299 Bar* bar = new Bar(&deleteCount);
300 sp<Bar> sp3(bar);
301 buffer = sp3;
302 bufferFull = true;
303 ASSERT_TRUE(bar->getStrongCount() >= 1);
304 // Weak count includes strong count.
305 ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
306 sp3->mVisited1 = true;
307 sp3 = nullptr;
308 }
309 t.join();
310 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
311 FAIL();
312 }
313 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
314 } // Otherwise this is slow and probably pointless on a uniprocessor.
315}
Hans Boehm23c857e2016-08-02 18:39:30 -0700316
317static wp<Bar> wpBuffer;
318static std::atomic<bool> wpBufferFull(false);
319
320// Wait until wpBufferFull has value val.
321static inline void wpWaitFor(bool val) {
322 while (wpBufferFull != val) {}
323}
324
325static void visit3AndRemove() {
326 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
327 FAIL() << "setaffinity returned:" << errno;
328 }
329 for (int i = 0; i < NITERS; ++i) {
330 wpWaitFor(true);
331 {
332 sp<Bar> sp1 = wpBuffer.promote();
333 // We implicitly check that sp1 != NULL
334 sp1->mVisited2 = true;
335 }
336 wpBuffer = nullptr;
337 wpBufferFull = false;
338 }
339}
340
341TEST(RefBase, RacingPromotions) {
342 cpu_set_t origCpus;
343 cpu_set_t myCpus;
344 // Restrict us and the helper thread to disjoint cpu sets.
345 // This prevents us from getting scheduled against each other,
346 // which would be atrociously slow.
347 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
348 std::thread t(visit3AndRemove);
349 std::atomic<int> deleteCount(0);
350 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
351 FAIL() << "setaffinity returned:" << errno;
352 }
353 for (int i = 0; i < NITERS; ++i) {
354 Bar* bar = new Bar(&deleteCount);
355 wp<Bar> wp1(bar);
356 bar->mVisited1 = true;
357 if (i % (NITERS / 10) == 0) {
358 // Do this rarely, since it generates a log message.
359 wp1 = nullptr; // No longer destroys the object.
360 wp1 = bar;
361 }
362 wpBuffer = wp1;
363 ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
364 wpBufferFull = true;
365 // Promotion races with that in visit3AndRemove.
366 // This may or may not succeed, but it shouldn't interfere with
367 // the concurrent one.
368 sp<Bar> sp1 = wp1.promote();
369 wpWaitFor(false); // Waits for other thread to drop strong pointer.
370 sp1 = nullptr;
371 // No strong pointers here.
372 sp1 = wp1.promote();
373 ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
374 }
375 t.join();
376 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
377 FAIL();
378 }
379 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
380 } // Otherwise this is slow and probably pointless on a uniprocessor.
381}