Merge "Add logic to persist jobs in separate files."
diff --git a/apex/jobscheduler/service/java/com/android/server/job/JobSchedulerService.java b/apex/jobscheduler/service/java/com/android/server/job/JobSchedulerService.java
index d9fb318..358f009 100644
--- a/apex/jobscheduler/service/java/com/android/server/job/JobSchedulerService.java
+++ b/apex/jobscheduler/service/java/com/android/server/job/JobSchedulerService.java
@@ -177,7 +177,7 @@
@EnabledAfter(targetSdkVersion = Build.VERSION_CODES.TIRAMISU)
private static final long REQUIRE_NETWORK_CONSTRAINT_FOR_NETWORK_JOB_WORK_ITEMS = 241104082L;
- @VisibleForTesting
+ @VisibleForTesting(visibility = VisibleForTesting.Visibility.PACKAGE)
public static Clock sSystemClock = Clock.systemUTC();
private abstract static class MySimpleClock extends Clock {
@@ -454,6 +454,10 @@
runtimeUpdated = true;
}
break;
+ case Constants.KEY_PERSIST_IN_SPLIT_FILES:
+ mConstants.updatePersistingConstantsLocked();
+ mJobs.setUseSplitFiles(mConstants.PERSIST_IN_SPLIT_FILES);
+ break;
default:
if (name.startsWith(JobConcurrencyManager.CONFIG_KEY_PREFIX_CONCURRENCY)
&& !concurrencyUpdated) {
@@ -537,6 +541,8 @@
private static final String KEY_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS =
"runtime_min_high_priority_guarantee_ms";
+ private static final String KEY_PERSIST_IN_SPLIT_FILES = "persist_in_split_files";
+
private static final int DEFAULT_MIN_READY_NON_ACTIVE_JOBS_COUNT = 5;
private static final long DEFAULT_MAX_NON_ACTIVE_JOB_BATCH_DELAY_MS = 31 * MINUTE_IN_MILLIS;
private static final float DEFAULT_HEAVY_USE_FACTOR = .9f;
@@ -563,6 +569,7 @@
public static final long DEFAULT_RUNTIME_MIN_EJ_GUARANTEE_MS = 3 * MINUTE_IN_MILLIS;
@VisibleForTesting
static final long DEFAULT_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS = 5 * MINUTE_IN_MILLIS;
+ static final boolean DEFAULT_PERSIST_IN_SPLIT_FILES = false;
private static final boolean DEFAULT_USE_TARE_POLICY = false;
/**
@@ -678,6 +685,12 @@
DEFAULT_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS;
/**
+ * Whether to persist jobs in split files (by UID). If false, all persisted jobs will be
+ * saved in a single file.
+ */
+ public boolean PERSIST_IN_SPLIT_FILES = DEFAULT_PERSIST_IN_SPLIT_FILES;
+
+ /**
* If true, use TARE policy for job limiting. If false, use quotas.
*/
public boolean USE_TARE_POLICY = DEFAULT_USE_TARE_POLICY;
@@ -735,6 +748,11 @@
DEFAULT_CONN_LOW_SIGNAL_STRENGTH_RELAX_FRAC);
}
+ private void updatePersistingConstantsLocked() {
+ PERSIST_IN_SPLIT_FILES = DeviceConfig.getBoolean(DeviceConfig.NAMESPACE_JOB_SCHEDULER,
+ KEY_PERSIST_IN_SPLIT_FILES, DEFAULT_PERSIST_IN_SPLIT_FILES);
+ }
+
private void updatePrefetchConstantsLocked() {
PREFETCH_FORCE_BATCH_RELAX_THRESHOLD_MS = DeviceConfig.getLong(
DeviceConfig.NAMESPACE_JOB_SCHEDULER,
@@ -835,6 +853,8 @@
pw.print(KEY_RUNTIME_FREE_QUOTA_MAX_LIMIT_MS, RUNTIME_FREE_QUOTA_MAX_LIMIT_MS)
.println();
+ pw.print(KEY_PERSIST_IN_SPLIT_FILES, PERSIST_IN_SPLIT_FILES).println();
+
pw.print(Settings.Global.ENABLE_TARE, USE_TARE_POLICY).println();
pw.decreaseIndent();
diff --git a/apex/jobscheduler/service/java/com/android/server/job/JobStore.java b/apex/jobscheduler/service/java/com/android/server/job/JobStore.java
index 68cb049..2f94705f 100644
--- a/apex/jobscheduler/service/java/com/android/server/job/JobStore.java
+++ b/apex/jobscheduler/service/java/com/android/server/job/JobStore.java
@@ -40,6 +40,7 @@
import android.util.Pair;
import android.util.Slog;
import android.util.SparseArray;
+import android.util.SparseBooleanArray;
import android.util.SystemConfigFileCommitEventLogger;
import android.util.Xml;
@@ -89,6 +90,8 @@
/** Threshold to adjust how often we want to write to the db. */
private static final long JOB_PERSIST_DELAY = 2000L;
+ private static final String JOB_FILE_SPLIT_PREFIX = "jobs_";
+ private static final int ALL_UIDS = -1;
final Object mLock;
final Object mWriteScheduleLock; // used solely for invariants around write scheduling
@@ -105,13 +108,20 @@
@GuardedBy("mWriteScheduleLock")
private boolean mWriteInProgress;
+ @GuardedBy("mWriteScheduleLock")
+ private boolean mSplitFileMigrationNeeded;
+
private static final Object sSingletonLock = new Object();
private final SystemConfigFileCommitEventLogger mEventLogger;
private final AtomicFile mJobsFile;
+ private final File mJobFileDirectory;
+ private final SparseBooleanArray mPendingJobWriteUids = new SparseBooleanArray();
/** Handler backed by IoThread for writing to disk. */
private final Handler mIoHandler = IoThread.getHandler();
private static JobStore sSingleton;
+ private boolean mUseSplitFiles = JobSchedulerService.Constants.DEFAULT_PERSIST_IN_SPLIT_FILES;
+
private JobStorePersistStats mPersistInfo = new JobStorePersistStats();
/** Used by the {@link JobSchedulerService} to instantiate the JobStore. */
@@ -144,10 +154,10 @@
mContext = context;
File systemDir = new File(dataDir, "system");
- File jobDir = new File(systemDir, "job");
- jobDir.mkdirs();
+ mJobFileDirectory = new File(systemDir, "job");
+ mJobFileDirectory.mkdirs();
mEventLogger = new SystemConfigFileCommitEventLogger("jobs");
- mJobsFile = new AtomicFile(new File(jobDir, "jobs.xml"), mEventLogger);
+ mJobsFile = createJobFile(new File(mJobFileDirectory, "jobs.xml"));
mJobSet = new JobSet();
@@ -162,12 +172,21 @@
// an incorrect historical timestamp. That's fine; at worst we'll reboot with
// a *correct* timestamp, see a bunch of overdue jobs, and run them; then
// settle into normal operation.
- mXmlTimestamp = mJobsFile.getLastModifiedTime();
+ mXmlTimestamp = mJobsFile.exists()
+ ? mJobsFile.getLastModifiedTime() : mJobFileDirectory.lastModified();
mRtcGood = (sSystemClock.millis() > mXmlTimestamp);
readJobMapFromDisk(mJobSet, mRtcGood);
}
+ private AtomicFile createJobFile(String baseName) {
+ return createJobFile(new File(mJobFileDirectory, baseName + ".xml"));
+ }
+
+ private AtomicFile createJobFile(File file) {
+ return new AtomicFile(file, mEventLogger);
+ }
+
public boolean jobTimesInflatedValid() {
return mRtcGood;
}
@@ -211,6 +230,7 @@
public void add(JobStatus jobStatus) {
mJobSet.add(jobStatus);
if (jobStatus.isPersisted()) {
+ mPendingJobWriteUids.put(jobStatus.getUid(), true);
maybeWriteStatusToDiskAsync();
}
if (DEBUG) {
@@ -224,6 +244,9 @@
@VisibleForTesting
public void addForTesting(JobStatus jobStatus) {
mJobSet.add(jobStatus);
+ if (jobStatus.isPersisted()) {
+ mPendingJobWriteUids.put(jobStatus.getUid(), true);
+ }
}
boolean containsJob(JobStatus jobStatus) {
@@ -257,12 +280,24 @@
return false;
}
if (removeFromPersisted && jobStatus.isPersisted()) {
+ mPendingJobWriteUids.put(jobStatus.getUid(), true);
maybeWriteStatusToDiskAsync();
}
return removed;
}
/**
+ * Like {@link #remove(JobStatus, boolean)}, but doesn't schedule a disk write.
+ */
+ @VisibleForTesting
+ public void removeForTesting(JobStatus jobStatus) {
+ mJobSet.remove(jobStatus);
+ if (jobStatus.isPersisted()) {
+ mPendingJobWriteUids.put(jobStatus.getUid(), true);
+ }
+ }
+
+ /**
* Remove the jobs of users not specified in the keepUserIds.
* @param keepUserIds Array of User IDs whose jobs should be kept and not removed.
*/
@@ -273,6 +308,7 @@
@VisibleForTesting
public void clear() {
mJobSet.clear();
+ mPendingJobWriteUids.put(ALL_UIDS, true);
maybeWriteStatusToDiskAsync();
}
@@ -282,6 +318,36 @@
@VisibleForTesting
public void clearForTesting() {
mJobSet.clear();
+ mPendingJobWriteUids.put(ALL_UIDS, true);
+ }
+
+ void setUseSplitFiles(boolean useSplitFiles) {
+ synchronized (mLock) {
+ if (mUseSplitFiles != useSplitFiles) {
+ mUseSplitFiles = useSplitFiles;
+ migrateJobFilesAsync();
+ }
+ }
+ }
+
+ /**
+ * The same as above but does not schedule writing. This makes perf benchmarks more stable.
+ */
+ @VisibleForTesting
+ public void setUseSplitFilesForTesting(boolean useSplitFiles) {
+ final boolean changed;
+ synchronized (mLock) {
+ changed = mUseSplitFiles != useSplitFiles;
+ if (changed) {
+ mUseSplitFiles = useSplitFiles;
+ mPendingJobWriteUids.put(ALL_UIDS, true);
+ }
+ }
+ if (changed) {
+ synchronized (mWriteScheduleLock) {
+ mSplitFileMigrationNeeded = true;
+ }
+ }
}
/**
@@ -352,6 +418,16 @@
private static final String XML_TAG_ONEOFF = "one-off";
private static final String XML_TAG_EXTRAS = "extras";
+ private void migrateJobFilesAsync() {
+ synchronized (mLock) {
+ mPendingJobWriteUids.put(ALL_UIDS, true);
+ }
+ synchronized (mWriteScheduleLock) {
+ mSplitFileMigrationNeeded = true;
+ maybeWriteStatusToDiskAsync();
+ }
+ }
+
/**
* Every time the state changes we write all the jobs in one swath, instead of trying to
* track incremental changes.
@@ -449,10 +525,38 @@
* NOTE: This Runnable locks on mLock
*/
private final Runnable mWriteRunnable = new Runnable() {
+ private final SparseArray<AtomicFile> mJobFiles = new SparseArray<>();
+ private final CopyConsumer mPersistedJobCopier = new CopyConsumer();
+
+ class CopyConsumer implements Consumer<JobStatus> {
+ private final SparseArray<List<JobStatus>> mJobStoreCopy = new SparseArray<>();
+ private boolean mCopyAllJobs;
+
+ private void prepare() {
+ mCopyAllJobs = !mUseSplitFiles || mPendingJobWriteUids.get(ALL_UIDS);
+ }
+
+ @Override
+ public void accept(JobStatus jobStatus) {
+ final int uid = mUseSplitFiles ? jobStatus.getUid() : ALL_UIDS;
+ if (jobStatus.isPersisted() && (mCopyAllJobs || mPendingJobWriteUids.get(uid))) {
+ List<JobStatus> uidJobList = mJobStoreCopy.get(uid);
+ if (uidJobList == null) {
+ uidJobList = new ArrayList<>();
+ mJobStoreCopy.put(uid, uidJobList);
+ }
+ uidJobList.add(new JobStatus(jobStatus));
+ }
+ }
+
+ private void reset() {
+ mJobStoreCopy.clear();
+ }
+ }
+
@Override
public void run() {
final long startElapsed = sElapsedRealtimeClock.millis();
- final List<JobStatus> storeCopy = new ArrayList<JobStatus>();
// Intentionally allow new scheduling of a write operation *before* we clone
// the job set. If we reset it to false after cloning, there's a window in
// which no new write will be scheduled but mLock is not held, i.e. a new
@@ -469,31 +573,73 @@
}
mWriteInProgress = true;
}
+ final boolean useSplitFiles;
synchronized (mLock) {
// Clone the jobs so we can release the lock before writing.
- mJobSet.forEachJob(null, (job) -> {
- if (job.isPersisted()) {
- storeCopy.add(new JobStatus(job));
- }
- });
+ useSplitFiles = mUseSplitFiles;
+ mPersistedJobCopier.prepare();
+ mJobSet.forEachJob(null, mPersistedJobCopier);
+ mPendingJobWriteUids.clear();
}
- writeJobsMapImpl(storeCopy);
+ mPersistInfo.countAllJobsSaved = 0;
+ mPersistInfo.countSystemServerJobsSaved = 0;
+ mPersistInfo.countSystemSyncManagerJobsSaved = 0;
+ for (int i = mPersistedJobCopier.mJobStoreCopy.size() - 1; i >= 0; --i) {
+ AtomicFile file;
+ if (useSplitFiles) {
+ final int uid = mPersistedJobCopier.mJobStoreCopy.keyAt(i);
+ file = mJobFiles.get(uid);
+ if (file == null) {
+ file = createJobFile(JOB_FILE_SPLIT_PREFIX + uid);
+ mJobFiles.put(uid, file);
+ }
+ } else {
+ file = mJobsFile;
+ }
+ if (DEBUG) {
+ Slog.d(TAG, "Writing for " + mPersistedJobCopier.mJobStoreCopy.keyAt(i)
+ + " to " + file.getBaseFile().getName() + ": "
+ + mPersistedJobCopier.mJobStoreCopy.valueAt(i).size() + " jobs");
+ }
+ writeJobsMapImpl(file, mPersistedJobCopier.mJobStoreCopy.valueAt(i));
+ }
if (DEBUG) {
Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis()
- startElapsed) + "ms");
}
+ mPersistedJobCopier.reset();
+ if (!useSplitFiles) {
+ mJobFiles.clear();
+ }
+ // Update the last modified time of the directory to aid in RTC time verification
+ // (see the JobStore constructor).
+ mJobFileDirectory.setLastModified(sSystemClock.millis());
synchronized (mWriteScheduleLock) {
+ if (mSplitFileMigrationNeeded) {
+ final File[] files = mJobFileDirectory.listFiles();
+ for (File file : files) {
+ if (useSplitFiles) {
+ if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
+ // Delete the now unused file so there's no confusion in the future.
+ file.delete();
+ }
+ } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
+ // Delete the now unused file so there's no confusion in the future.
+ file.delete();
+ }
+ }
+ }
mWriteInProgress = false;
mWriteScheduleLock.notifyAll();
}
}
- private void writeJobsMapImpl(List<JobStatus> jobList) {
+ private void writeJobsMapImpl(@NonNull AtomicFile file, @NonNull List<JobStatus> jobList) {
int numJobs = 0;
int numSystemJobs = 0;
int numSyncJobs = 0;
mEventLogger.setStartTime(SystemClock.uptimeMillis());
- try (FileOutputStream fos = mJobsFile.startWrite()) {
+ try (FileOutputStream fos = file.startWrite()) {
TypedXmlSerializer out = Xml.resolveSerializer(fos);
out.startDocument(null, true);
out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true);
@@ -523,7 +669,7 @@
out.endTag(null, "job-info");
out.endDocument();
- mJobsFile.finishWrite(fos);
+ file.finishWrite(fos);
} catch (IOException e) {
if (DEBUG) {
Slog.v(TAG, "Error writing out job data.", e);
@@ -533,9 +679,9 @@
Slog.d(TAG, "Error persisting bundle.", e);
}
} finally {
- mPersistInfo.countAllJobsSaved = numJobs;
- mPersistInfo.countSystemServerJobsSaved = numSystemJobs;
- mPersistInfo.countSystemSyncManagerJobsSaved = numSyncJobs;
+ mPersistInfo.countAllJobsSaved += numJobs;
+ mPersistInfo.countSystemServerJobsSaved += numSystemJobs;
+ mPersistInfo.countSystemSyncManagerJobsSaved += numSyncJobs;
}
}
@@ -720,49 +866,82 @@
@Override
public void run() {
+ if (!mJobFileDirectory.isDirectory()) {
+ Slog.wtf(TAG, "jobs directory isn't a directory O.O");
+ mJobFileDirectory.mkdirs();
+ return;
+ }
+
int numJobs = 0;
int numSystemJobs = 0;
int numSyncJobs = 0;
List<JobStatus> jobs;
- try (FileInputStream fis = mJobsFile.openRead()) {
- synchronized (mLock) {
- jobs = readJobMapImpl(fis, rtcGood);
- if (jobs != null) {
- long now = sElapsedRealtimeClock.millis();
- for (int i=0; i<jobs.size(); i++) {
- JobStatus js = jobs.get(i);
- js.prepareLocked();
- js.enqueueTime = now;
- this.jobSet.add(js);
+ final File[] files;
+ try {
+ files = mJobFileDirectory.listFiles();
+ } catch (SecurityException e) {
+ Slog.wtf(TAG, "Not allowed to read job file directory", e);
+ return;
+ }
+ if (files == null) {
+ Slog.wtfStack(TAG, "Couldn't get job file list");
+ return;
+ }
+ boolean needFileMigration = false;
+ long now = sElapsedRealtimeClock.millis();
+ for (File file : files) {
+ final AtomicFile aFile = createJobFile(file);
+ try (FileInputStream fis = aFile.openRead()) {
+ synchronized (mLock) {
+ jobs = readJobMapImpl(fis, rtcGood);
+ if (jobs != null) {
+ for (int i = 0; i < jobs.size(); i++) {
+ JobStatus js = jobs.get(i);
+ js.prepareLocked();
+ js.enqueueTime = now;
+ this.jobSet.add(js);
- numJobs++;
- if (js.getUid() == Process.SYSTEM_UID) {
- numSystemJobs++;
- if (isSyncJob(js)) {
- numSyncJobs++;
+ numJobs++;
+ if (js.getUid() == Process.SYSTEM_UID) {
+ numSystemJobs++;
+ if (isSyncJob(js)) {
+ numSyncJobs++;
+ }
}
}
}
}
+ } catch (FileNotFoundException e) {
+ // mJobFileDirectory.listFiles() gave us this file...why can't we find it???
+ Slog.e(TAG, "Could not find jobs file: " + file.getName());
+ } catch (XmlPullParserException | IOException e) {
+ Slog.wtf(TAG, "Error in " + file.getName(), e);
+ } catch (Exception e) {
+ // Crashing at this point would result in a boot loop, so live with a general
+ // Exception for system stability's sake.
+ Slog.wtf(TAG, "Unexpected exception", e);
}
- } catch (FileNotFoundException e) {
- if (DEBUG) {
- Slog.d(TAG, "Could not find jobs file, probably there was nothing to load.");
- }
- } catch (XmlPullParserException | IOException e) {
- Slog.wtf(TAG, "Error jobstore xml.", e);
- } catch (Exception e) {
- // Crashing at this point would result in a boot loop, so live with a general
- // Exception for system stability's sake.
- Slog.wtf(TAG, "Unexpected exception", e);
- } finally {
- if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once.
- mPersistInfo.countAllJobsLoaded = numJobs;
- mPersistInfo.countSystemServerJobsLoaded = numSystemJobs;
- mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs;
+ if (mUseSplitFiles) {
+ if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
+ // We're supposed to be using the split file architecture, but we still have
+ // the old job file around. Fully migrate and remove the old file.
+ needFileMigration = true;
+ }
+ } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
+ // We're supposed to be using the legacy single file architecture, but we still
+ // have some job split files around. Fully migrate and remove the split files.
+ needFileMigration = true;
}
}
+ if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once.
+ mPersistInfo.countAllJobsLoaded = numJobs;
+ mPersistInfo.countSystemServerJobsLoaded = numSystemJobs;
+ mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs;
+ }
Slog.i(TAG, "Read " + numJobs + " jobs");
+ if (needFileMigration) {
+ migrateJobFilesAsync();
+ }
}
private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood)
diff --git a/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java b/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java
index f138311..808130a 100644
--- a/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java
+++ b/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java
@@ -155,7 +155,18 @@
}
@Test
- public void testWritingTwoFilesToDisk() throws Exception {
+ public void testWritingTwoJobsToDisk_singleFile() throws Exception {
+ mTaskStoreUnderTest.setUseSplitFiles(false);
+ runWritingTwoJobsToDisk();
+ }
+
+ @Test
+ public void testWritingTwoJobsToDisk_splitFiles() throws Exception {
+ mTaskStoreUnderTest.setUseSplitFiles(true);
+ runWritingTwoJobsToDisk();
+ }
+
+ private void runWritingTwoJobsToDisk() throws Exception {
final JobInfo task1 = new Builder(8, mComponent)
.setRequiresDeviceIdle(true)
.setPeriodic(10000L)
@@ -169,8 +180,10 @@
.setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED)
.setPersisted(true)
.build();
- final JobStatus taskStatus1 = JobStatus.createFromJobInfo(task1, SOME_UID, null, -1, null);
- final JobStatus taskStatus2 = JobStatus.createFromJobInfo(task2, SOME_UID, null, -1, null);
+ final int uid1 = SOME_UID;
+ final int uid2 = uid1 + 1;
+ final JobStatus taskStatus1 = JobStatus.createFromJobInfo(task1, uid1, null, -1, null);
+ final JobStatus taskStatus2 = JobStatus.createFromJobInfo(task2, uid2, null, -1, null);
mTaskStoreUnderTest.add(taskStatus1);
mTaskStoreUnderTest.add(taskStatus2);
waitForPendingIo();
diff --git a/tests/JobSchedulerPerfTests/src/com/android/frameworks/perftests/job/JobStorePerfTests.java b/tests/JobSchedulerPerfTests/src/com/android/frameworks/perftests/job/JobStorePerfTests.java
index dd9b294..afaeca1 100644
--- a/tests/JobSchedulerPerfTests/src/com/android/frameworks/perftests/job/JobStorePerfTests.java
+++ b/tests/JobSchedulerPerfTests/src/com/android/frameworks/perftests/job/JobStorePerfTests.java
@@ -15,7 +15,6 @@
*/
package com.android.frameworks.perftests.job;
-
import android.app.job.JobInfo;
import android.content.ComponentName;
import android.content.Context;
@@ -46,7 +45,8 @@
public class JobStorePerfTests {
private static final String SOURCE_PACKAGE = "com.android.frameworks.perftests.job";
private static final int SOURCE_USER_ID = 0;
- private static final int CALLING_UID = 10079;
+ private static final int BASE_CALLING_UID = 10079;
+ private static final int MAX_UID_COUNT = 10;
private static Context sContext;
private static File sTestDir;
@@ -65,10 +65,10 @@
sJobStore = JobStore.initAndGetForTesting(sContext, sTestDir);
for (int i = 0; i < 50; i++) {
- sFewJobs.add(createJobStatus("fewJobs", i));
+ sFewJobs.add(createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % MAX_UID_COUNT)));
}
for (int i = 0; i < 500; i++) {
- sManyJobs.add(createJobStatus("manyJobs", i));
+ sManyJobs.add(createJobStatus("manyJobs", i, BASE_CALLING_UID + (i % MAX_UID_COUNT)));
}
}
@@ -104,6 +104,64 @@
runPersistedJobWriting(sManyJobs);
}
+ private void runPersistedJobWriting_delta(List<JobStatus> jobList,
+ List<JobStatus> jobAdditions, List<JobStatus> jobRemovals) {
+ final ManualBenchmarkState benchmarkState = mPerfManualStatusReporter.getBenchmarkState();
+
+ long elapsedTimeNs = 0;
+ while (benchmarkState.keepRunning(elapsedTimeNs)) {
+ sJobStore.clearForTesting();
+ for (JobStatus job : jobList) {
+ sJobStore.addForTesting(job);
+ }
+ sJobStore.writeStatusToDiskForTesting();
+
+ for (JobStatus job : jobAdditions) {
+ sJobStore.addForTesting(job);
+ }
+ for (JobStatus job : jobRemovals) {
+ sJobStore.removeForTesting(job);
+ }
+
+ final long startTime = SystemClock.elapsedRealtimeNanos();
+ sJobStore.writeStatusToDiskForTesting();
+ final long endTime = SystemClock.elapsedRealtimeNanos();
+ elapsedTimeNs = endTime - startTime;
+ }
+ }
+
+ @Test
+ public void testPersistedJobWriting_delta_fewJobs() {
+ List<JobStatus> additions = new ArrayList<>();
+ List<JobStatus> removals = new ArrayList<>();
+ final int numModifiedUids = MAX_UID_COUNT / 2;
+ for (int i = 0; i < sFewJobs.size() / 3; ++i) {
+ JobStatus job = createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % numModifiedUids));
+ if (i % 2 == 0) {
+ additions.add(job);
+ } else {
+ removals.add(job);
+ }
+ }
+ runPersistedJobWriting_delta(sFewJobs, additions, removals);
+ }
+
+ @Test
+ public void testPersistedJobWriting_delta_manyJobs() {
+ List<JobStatus> additions = new ArrayList<>();
+ List<JobStatus> removals = new ArrayList<>();
+ final int numModifiedUids = MAX_UID_COUNT / 2;
+ for (int i = 0; i < sManyJobs.size() / 3; ++i) {
+ JobStatus job = createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % numModifiedUids));
+ if (i % 2 == 0) {
+ additions.add(job);
+ } else {
+ removals.add(job);
+ }
+ }
+ runPersistedJobWriting_delta(sManyJobs, additions, removals);
+ }
+
private void runPersistedJobReading(List<JobStatus> jobList, boolean rtcIsGood) {
final ManualBenchmarkState benchmarkState = mPerfManualStatusReporter.getBenchmarkState();
@@ -144,12 +202,12 @@
runPersistedJobReading(sManyJobs, false);
}
- private static JobStatus createJobStatus(String testTag, int jobId) {
+ private static JobStatus createJobStatus(String testTag, int jobId, int callingUid) {
JobInfo jobInfo = new JobInfo.Builder(jobId,
new ComponentName(sContext, "JobStorePerfTestJobService"))
.setPersisted(true)
.build();
return JobStatus.createFromJobInfo(
- jobInfo, CALLING_UID, SOURCE_PACKAGE, SOURCE_USER_ID, testTag);
+ jobInfo, callingUid, SOURCE_PACKAGE, SOURCE_USER_ID, testTag);
}
}