Merge "MIPS: Lower LibartImgDeviceBaseAddress() to fix ART tests."
diff --git a/android/paths.go b/android/paths.go
index a23dd74..26b72d1 100644
--- a/android/paths.go
+++ b/android/paths.go
@@ -584,10 +584,10 @@
 	var vndkOrNdkDir string
 	var ext string
 	if isSourceDump {
-		ext = ".lsdump"
+		ext = ".lsdump.gz"
 		sourceOrBinaryDir = "source-based"
 	} else {
-		ext = ".bdump"
+		ext = ".bdump.gz"
 		sourceOrBinaryDir = "binary-based"
 	}
 	if vndkOrNdk {
diff --git a/android/variable.go b/android/variable.go
index 666d729..05f50b5 100644
--- a/android/variable.go
+++ b/android/variable.go
@@ -138,6 +138,7 @@
 	EnableCFI                  *bool `json:",omitempty"`
 	Device_uses_hwc2           *bool `json:",omitempty"`
 	Treble                     *bool `json:",omitempty"`
+	Pdk                        *bool `json:",omitempty"`
 
 	VendorPath *string `json:",omitempty"`
 
diff --git a/cc/androidmk.go b/cc/androidmk.go
index cba8782..2a3b344 100644
--- a/cc/androidmk.go
+++ b/cc/androidmk.go
@@ -149,6 +149,7 @@
 			fmt.Fprintln(w, "LOCAL_ADDITIONAL_DEPENDENCIES += ", library.sAbiOutputFile.String())
 			if library.sAbiDiff.Valid() && !library.static() {
 				fmt.Fprintln(w, "LOCAL_ADDITIONAL_DEPENDENCIES += ", library.sAbiDiff.String())
+				fmt.Fprintln(w, "HEADER_ABI_DIFFS += ", library.sAbiDiff.String())
 			}
 		}
 
@@ -203,6 +204,7 @@
 
 func (benchmark *benchmarkDecorator) AndroidMk(ctx AndroidMkContext, ret *android.AndroidMkData) {
 	ctx.subAndroidMk(ret, benchmark.binaryDecorator)
+	ret.Class = "NATIVE_TESTS"
 	ret.Extra = append(ret.Extra, func(w io.Writer, outputFile android.Path) error {
 		if len(benchmark.Properties.Test_suites) > 0 {
 			fmt.Fprintln(w, "LOCAL_COMPATIBILITY_SUITE :=",
diff --git a/cc/builder.go b/cc/builder.go
index a4fda5b..51c4ce9 100644
--- a/cc/builder.go
+++ b/cc/builder.go
@@ -158,9 +158,10 @@
 
 	_ = pctx.SourcePathVariable("sAbiDumper", "prebuilts/build-tools/${config.HostPrebuiltTag}/bin/header-abi-dumper")
 
+	// -w has been added since header-abi-dumper does not need to produce any sort of diagnostic information.
 	sAbiDump = pctx.AndroidStaticRule("sAbiDump",
 		blueprint.RuleParams{
-			Command:     "rm -f $out && $sAbiDumper -o ${out} $in $exportDirs -- $cFlags -Wno-packed -Qunused-arguments -isystem ${config.RSIncludePath}",
+			Command:     "rm -f $out && $sAbiDumper -o ${out} $in $exportDirs -- $cFlags -w -isystem ${config.RSIncludePath}",
 			CommandDeps: []string{"$sAbiDumper"},
 		},
 		"cFlags", "exportDirs")
@@ -177,13 +178,19 @@
 		"symbolFile", "arch", "api", "exportedHeaderFlags")
 
 	_ = pctx.SourcePathVariable("sAbiDiffer", "prebuilts/build-tools/${config.HostPrebuiltTag}/bin/header-abi-diff")
+
 	// Abidiff check turned on in advice-only mode. Builds will not fail on abi incompatibilties / extensions.
 	sAbiDiff = pctx.AndroidStaticRule("sAbiDiff",
 		blueprint.RuleParams{
-			Command:     "$sAbiDiffer -advice-only -o ${out} -new $in -old $referenceDump",
+			Command:     "$sAbiDiffer -lib $libName -arch $arch -advice-only -o ${out} -new $in -old $referenceDump",
 			CommandDeps: []string{"$sAbiDiffer"},
 		},
-		"referenceDump")
+		"referenceDump", "libName", "arch")
+
+	unzipRefSAbiDump = pctx.AndroidStaticRule("unzipRefSAbiDump",
+		blueprint.RuleParams{
+			Command: "gunzip -c $in > $out",
+		})
 )
 
 func init() {
@@ -631,6 +638,17 @@
 	return android.OptionalPathForPath(outputFile)
 }
 
+func UnzipRefDump(ctx android.ModuleContext, zippedRefDump android.Path, baseName string) android.Path {
+	outputFile := android.PathForModuleOut(ctx, baseName+"_ref.lsdump")
+	ctx.ModuleBuild(pctx, android.ModuleBuildParams{
+		Rule:        unzipRefSAbiDump,
+		Description: "gunzip" + outputFile.Base(),
+		Output:      outputFile,
+		Input:       zippedRefDump,
+	})
+	return outputFile
+}
+
 func SourceAbiDiff(ctx android.ModuleContext, inputDump android.Path, referenceDump android.Path,
 	baseName string) android.OptionalPath {
 	outputFile := android.PathForModuleOut(ctx, baseName+".abidiff")
@@ -642,6 +660,8 @@
 		Implicit:    referenceDump,
 		Args: map[string]string{
 			"referenceDump": referenceDump.String(),
+			"libName":       baseName,
+			"arch":          ctx.Arch().ArchType.Name,
 		},
 	})
 	return android.OptionalPathForPath(outputFile)
diff --git a/cc/cc.go b/cc/cc.go
index 8069a90..43825ca 100644
--- a/cc/cc.go
+++ b/cc/cc.go
@@ -429,8 +429,7 @@
 
 // Create source abi dumps if the module belongs to the list of VndkLibraries.
 func (ctx *moduleContextImpl) createVndkSourceAbiDump() bool {
-	return ctx.ctx.Device() && (inList(ctx.baseModuleName(), config.LLndkLibraries())) ||
-		(inList(ctx.baseModuleName(), config.VndkLibraries()))
+	return ctx.ctx.Device() && ((Bool(ctx.mod.Properties.Vendor_available)) || (inList(ctx.baseModuleName(), config.LLndkLibraries())))
 }
 
 func (ctx *moduleContextImpl) selectedStl() string {
@@ -920,6 +919,9 @@
 						depPaths.ReexportedFlags = append(depPaths.ReexportedFlags, flags)
 						depPaths.ReexportedFlagsDeps = append(depPaths.ReexportedFlagsDeps,
 							genRule.GeneratedSourceFiles()...)
+						// Add these re-exported flags to help header-abi-dumper to infer the abi exported by a library.
+						c.sabi.Properties.ReexportedIncludeFlags = append(c.sabi.Properties.ReexportedIncludeFlags, flags)
+
 					}
 				} else {
 					ctx.ModuleErrorf("module %q is not a genrule", name)
@@ -969,6 +971,12 @@
 				if t.reexportFlags {
 					depPaths.ReexportedFlags = append(depPaths.ReexportedFlags, flags...)
 					depPaths.ReexportedFlagsDeps = append(depPaths.ReexportedFlagsDeps, deps...)
+					// Add these re-exported flags to help header-abi-dumper to infer the abi exported by a library.
+					// Re-exported flags from shared library dependencies are not included as those shared libraries
+					// will be included in the vndk set.
+					if tag == staticExportDepTag || tag == headerExportDepTag {
+						c.sabi.Properties.ReexportedIncludeFlags = append(c.sabi.Properties.ReexportedIncludeFlags, flags...)
+					}
 				}
 			}
 
diff --git a/cc/config/global.go b/cc/config/global.go
index 997256e..195b482 100644
--- a/cc/config/global.go
+++ b/cc/config/global.go
@@ -175,6 +175,7 @@
 		"-isystem bionic/libc/include",
 		"-isystem bionic/libc/kernel/uapi",
 		"-isystem bionic/libc/kernel/uapi/asm-" + kernelArch,
+		"-isystem bionic/libc/kernel/android/scsi",
 		"-isystem bionic/libc/kernel/android/uapi",
 	}, " ")
 }
diff --git a/cc/config/mips_device.go b/cc/config/mips_device.go
index 9e27f37..ec8f133 100644
--- a/cc/config/mips_device.go
+++ b/cc/config/mips_device.go
@@ -92,7 +92,6 @@
 			"-mfp32",
 			"-modd-spreg",
 			"-mno-fused-madd",
-			"-Wa,-mmxu",
 			"-mno-synci",
 		},
 		"mips32r2dsp-fp": []string{
diff --git a/cc/library.go b/cc/library.go
index d6a85e9..997344c 100644
--- a/cc/library.go
+++ b/cc/library.go
@@ -317,6 +317,27 @@
 	return library.baseCompiler.compilerFlags(ctx, flags)
 }
 
+func extractExportIncludesFromFlags(flags []string) []string {
+	// This method is used in the  generation of rules which produce
+	// abi-dumps for source files. Exported headers are needed to infer the
+	// abi exported by a library and filter out the rest of the abi dumped
+	// from a source. We extract the include flags exported by a library.
+	// This includes the flags exported which are re-exported from static
+	// library dependencies, exported header library dependencies and
+	// generated header dependencies. Re-exported shared library include
+	// flags are not in this set since shared library dependencies will
+	// themselves be included in the vndk. -isystem headers are not included
+	// since for bionic libraries, abi-filtering is taken care of by version
+	// scripts.
+	var exportedIncludes []string
+	for _, flag := range flags {
+		if strings.HasPrefix(flag, "-I") {
+			exportedIncludes = append(exportedIncludes, flag)
+		}
+	}
+	return exportedIncludes
+}
+
 func (library *libraryDecorator) compile(ctx ModuleContext, flags Flags, deps PathDeps) Objects {
 	if !library.buildShared() && !library.buildStatic() {
 		if len(library.baseCompiler.Properties.Srcs) > 0 {
@@ -330,13 +351,15 @@
 		}
 		return Objects{}
 	}
-	if ctx.createVndkSourceAbiDump() || (library.sabi.Properties.CreateSAbiDumps && ctx.Device()) {
+	if (ctx.createVndkSourceAbiDump() || (library.sabi.Properties.CreateSAbiDumps && ctx.Device())) && !ctx.Vendor() {
 		exportIncludeDirs := android.PathsForModuleSrc(ctx, library.flagExporter.Properties.Export_include_dirs)
 		var SourceAbiFlags []string
 		for _, dir := range exportIncludeDirs.Strings() {
-			SourceAbiFlags = append(SourceAbiFlags, "-I "+dir)
+			SourceAbiFlags = append(SourceAbiFlags, "-I"+dir)
 		}
-
+		for _, reexportedInclude := range extractExportIncludesFromFlags(library.sabi.Properties.ReexportedIncludeFlags) {
+			SourceAbiFlags = append(SourceAbiFlags, reexportedInclude)
+		}
 		flags.SAbiFlags = SourceAbiFlags
 		total_length := len(library.baseCompiler.Properties.Srcs) + len(deps.GeneratedSources) + len(library.Properties.Shared.Srcs) +
 			len(library.Properties.Static.Srcs)
@@ -573,7 +596,7 @@
 
 func (library *libraryDecorator) linkSAbiDumpFiles(ctx ModuleContext, objs Objects, fileName string) {
 	//Also take into account object re-use.
-	if len(objs.sAbiDumpFiles) > 0 && ctx.createVndkSourceAbiDump() {
+	if len(objs.sAbiDumpFiles) > 0 && ctx.createVndkSourceAbiDump() && !ctx.Vendor() {
 		refSourceDumpFile := android.PathForVndkRefAbiDump(ctx, "current", fileName, vndkVsNdk(ctx), true)
 		versionScript := android.OptionalPathForModuleSrc(ctx, library.Properties.Version_script)
 		var symbolFile android.OptionalPath
@@ -583,12 +606,16 @@
 		exportIncludeDirs := android.PathsForModuleSrc(ctx, library.flagExporter.Properties.Export_include_dirs)
 		var SourceAbiFlags []string
 		for _, dir := range exportIncludeDirs.Strings() {
-			SourceAbiFlags = append(SourceAbiFlags, "-I "+dir)
+			SourceAbiFlags = append(SourceAbiFlags, "-I"+dir)
+		}
+		for _, reexportedInclude := range extractExportIncludesFromFlags(library.sabi.Properties.ReexportedIncludeFlags) {
+			SourceAbiFlags = append(SourceAbiFlags, reexportedInclude)
 		}
 		exportedHeaderFlags := strings.Join(SourceAbiFlags, " ")
 		library.sAbiOutputFile = TransformDumpToLinkedDump(ctx, objs.sAbiDumpFiles, symbolFile, "current", fileName, exportedHeaderFlags)
 		if refSourceDumpFile.Valid() {
-			library.sAbiDiff = SourceAbiDiff(ctx, library.sAbiOutputFile.Path(), refSourceDumpFile.Path(), fileName)
+			unzippedRefDump := UnzipRefDump(ctx, refSourceDumpFile.Path(), fileName)
+			library.sAbiDiff = SourceAbiDiff(ctx, library.sAbiOutputFile.Path(), unzippedRefDump, fileName)
 		}
 	}
 }
diff --git a/cc/makevars.go b/cc/makevars.go
index 8bf034a..a1e97a5 100644
--- a/cc/makevars.go
+++ b/cc/makevars.go
@@ -42,6 +42,7 @@
 	ctx.Strict("RS_LLVM_PREBUILTS_VERSION", "${config.RSClangVersion}")
 	ctx.Strict("RS_LLVM_PREBUILTS_BASE", "${config.RSClangBase}")
 	ctx.Strict("RS_LLVM_PREBUILTS_PATH", "${config.RSLLVMPrebuiltsPath}")
+	ctx.Strict("RS_LLVM_INCLUDES", "${config.RSIncludePath}")
 	ctx.Strict("RS_CLANG", "${config.RSLLVMPrebuiltsPath}/clang")
 	ctx.Strict("RS_LLVM_AS", "${config.RSLLVMPrebuiltsPath}/llvm-as")
 	ctx.Strict("RS_LLVM_LINK", "${config.RSLLVMPrebuiltsPath}/llvm-link")
diff --git a/cc/sabi.go b/cc/sabi.go
index 7ae31c9..01ef737 100644
--- a/cc/sabi.go
+++ b/cc/sabi.go
@@ -22,7 +22,8 @@
 )
 
 type SAbiProperties struct {
-	CreateSAbiDumps bool `blueprint:"mutated"`
+	CreateSAbiDumps        bool `blueprint:"mutated"`
+	ReexportedIncludeFlags []string
 }
 
 type sabi struct {
@@ -45,7 +46,7 @@
 
 func sabiDepsMutator(mctx android.TopDownMutatorContext) {
 	if c, ok := mctx.Module().(*Module); ok &&
-		((inList(c.Name(), config.VndkLibraries())) || (inList(c.Name(), config.LLndkLibraries())) ||
+		(Bool(c.Properties.Vendor_available) || (inList(c.Name(), config.LLndkLibraries())) ||
 			(c.sabi != nil && c.sabi.Properties.CreateSAbiDumps)) {
 		mctx.VisitDirectDeps(func(m blueprint.Module) {
 			tag := mctx.OtherModuleDependencyTag(m)
diff --git a/cmd/multiproduct_kati/main.go b/cmd/multiproduct_kati/main.go
index b12628e..fb1c890 100644
--- a/cmd/multiproduct_kati/main.go
+++ b/cmd/multiproduct_kati/main.go
@@ -19,6 +19,7 @@
 	"context"
 	"flag"
 	"fmt"
+	"io/ioutil"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -47,15 +48,20 @@
 var keep = flag.Bool("keep", false, "keep successful output files")
 
 var outDir = flag.String("out", "", "path to store output directories (defaults to tmpdir under $OUT when empty)")
+var alternateResultDir = flag.Bool("dist", false, "write select results to $DIST_DIR (or <out>/dist when empty)")
 
 var onlyConfig = flag.Bool("only-config", false, "Only run product config (not Soong or Kati)")
 var onlySoong = flag.Bool("only-soong", false, "Only run product config and Soong (not Kati)")
 
 var buildVariant = flag.String("variant", "eng", "build variant to use")
 
+const errorLeadingLines = 20
+const errorTrailingLines = 20
+
 type Product struct {
-	ctx    build.Context
-	config build.Config
+	ctx     build.Context
+	config  build.Config
+	logFile string
 }
 
 type Status struct {
@@ -82,7 +88,7 @@
 	s.total = total
 }
 
-func (s *Status) Fail(product string, err error) {
+func (s *Status) Fail(product string, err error, logFile string) {
 	s.Finish(product)
 
 	s.lock.Lock()
@@ -96,7 +102,26 @@
 	s.failed++
 	fmt.Fprintln(s.ctx.Stderr(), "FAILED:", product)
 	s.ctx.Verboseln("FAILED:", product)
-	s.ctx.Println(err)
+
+	if logFile != "" {
+		data, err := ioutil.ReadFile(logFile)
+		if err == nil {
+			lines := strings.Split(strings.TrimSpace(string(data)), "\n")
+			if len(lines) > errorLeadingLines+errorTrailingLines+1 {
+				lines[errorLeadingLines] = fmt.Sprintf("... skipping %d lines ...",
+					len(lines)-errorLeadingLines-errorTrailingLines)
+
+				lines = append(lines[:errorLeadingLines+1],
+					lines[len(lines)-errorTrailingLines:]...)
+			}
+			for _, line := range lines {
+				fmt.Fprintln(s.ctx.Stderr(), "> ", line)
+				s.ctx.Verboseln(line)
+			}
+		}
+	}
+
+	s.ctx.Print(err)
 }
 
 func (s *Status) Finish(product string) {
@@ -163,6 +188,13 @@
 
 		*outDir = filepath.Join(config.OutDir(), name)
 
+		// Ensure the empty files exist in the output directory
+		// containing our output directory too. This is mostly for
+		// safety, but also triggers the ninja_build file so that our
+		// build servers know that they can parse the output as if it
+		// was ninja output.
+		build.SetupOutDir(buildCtx, config)
+
 		if err := os.MkdirAll(*outDir, 0777); err != nil {
 			log.Fatalf("Failed to create tempdir: %v", err)
 		}
@@ -179,8 +211,15 @@
 	log.Println("Output directory:", *outDir)
 
 	build.SetupOutDir(buildCtx, config)
-	log.SetOutput(filepath.Join(config.OutDir(), "soong.log"))
-	trace.SetOutput(filepath.Join(config.OutDir(), "build.trace"))
+	if *alternateResultDir {
+		logsDir := filepath.Join(config.DistDir(), "logs")
+		os.MkdirAll(logsDir, 0777)
+		log.SetOutput(filepath.Join(logsDir, "soong.log"))
+		trace.SetOutput(filepath.Join(logsDir, "build.trace"))
+	} else {
+		log.SetOutput(filepath.Join(config.OutDir(), "soong.log"))
+		trace.SetOutput(filepath.Join(config.OutDir(), "build.trace"))
+	}
 
 	vars, err := build.DumpMakeVars(buildCtx, config, nil, nil, []string{"all_named_products"})
 	if err != nil {
@@ -198,24 +237,34 @@
 	for _, product := range products {
 		wg.Add(1)
 		go func(product string) {
+			var stdLog string
+
 			defer wg.Done()
 			defer logger.Recover(func(err error) {
-				status.Fail(product, err)
+				status.Fail(product, err, stdLog)
 			})
 
 			productOutDir := filepath.Join(config.OutDir(), product)
+			productLogDir := productOutDir
+			if *alternateResultDir {
+				productLogDir = filepath.Join(config.DistDir(), product)
+				if err := os.MkdirAll(productLogDir, 0777); err != nil {
+					log.Fatalf("Error creating log directory: %v", err)
+				}
+			}
 
 			if err := os.MkdirAll(productOutDir, 0777); err != nil {
 				log.Fatalf("Error creating out directory: %v", err)
 			}
 
-			f, err := os.Create(filepath.Join(productOutDir, "std.log"))
+			stdLog = filepath.Join(productLogDir, "std.log")
+			f, err := os.Create(stdLog)
 			if err != nil {
 				log.Fatalf("Error creating std.log: %v", err)
 			}
 
 			productLog := logger.New(&bytes.Buffer{})
-			productLog.SetOutput(filepath.Join(productOutDir, "soong.log"))
+			productLog.SetOutput(filepath.Join(productLogDir, "soong.log"))
 
 			productCtx := build.Context{&build.ContextImpl{
 				Context:        ctx,
@@ -230,7 +279,7 @@
 			productConfig.Lunch(productCtx, product, *buildVariant)
 
 			build.Build(productCtx, productConfig, build.BuildProductConfig)
-			productConfigs <- Product{productCtx, productConfig}
+			productConfigs <- Product{productCtx, productConfig, stdLog}
 		}(product)
 	}
 	go func() {
@@ -247,7 +296,7 @@
 			for product := range productConfigs {
 				func() {
 					defer logger.Recover(func(err error) {
-						status.Fail(product.config.TargetProduct(), err)
+						status.Fail(product.config.TargetProduct(), err, product.logFile)
 					})
 
 					buildWhat := 0
diff --git a/root.bp b/root.bp
index ee7c239..08f2ff8 100644
--- a/root.bp
+++ b/root.bp
@@ -25,7 +25,7 @@
     "hardware/*",
     "libcore",
     "libnativehelper",
-    "packages/apps/HTMLViewer",
+    "packages/apps/*",
     "prebuilts/clang/host/linux-x86",
     "prebuilts/ndk",
     "prebuilts/sdk",
diff --git a/ui/build/Android.bp b/ui/build/Android.bp
index 7a83684..25520da 100644
--- a/ui/build/Android.bp
+++ b/ui/build/Android.bp
@@ -21,6 +21,7 @@
     ],
     srcs: [
         "build.go",
+        "cleanbuild.go",
         "config.go",
         "context.go",
         "environment.go",
@@ -29,6 +30,7 @@
         "kati.go",
         "make.go",
         "ninja.go",
+        "proc_sync.go",
         "signal.go",
         "soong.go",
         "util.go",
@@ -36,6 +38,7 @@
     testSrcs: [
         "environment_test.go",
         "util_test.go",
+        "proc_sync_test.go",
     ],
     darwin: {
         srcs: [
diff --git a/ui/build/build.go b/ui/build/build.go
index 598e342..83dbcb6 100644
--- a/ui/build/build.go
+++ b/ui/build/build.go
@@ -18,7 +18,6 @@
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"strings"
 	"text/template"
 )
 
@@ -92,60 +91,13 @@
 	}
 }
 
-// Since products and build variants (unfortunately) shared the same
-// PRODUCT_OUT staging directory, things can get out of sync if different
-// build configurations are built in the same tree. This function will
-// notice when the configuration has changed and call installclean to
-// remove the files necessary to keep things consistent.
-func installcleanIfNecessary(ctx Context, config Config) {
-	if inList("installclean", config.Arguments()) {
-		return
-	}
-
-	configFile := config.DevicePreviousProductConfig()
-	prefix := "PREVIOUS_BUILD_CONFIG := "
-	suffix := "\n"
-	currentProduct := prefix + config.TargetProduct() + "-" + config.TargetBuildVariant() + suffix
-
-	writeConfig := func() {
-		err := ioutil.WriteFile(configFile, []byte(currentProduct), 0777)
-		if err != nil {
-			ctx.Fatalln("Failed to write product config:", err)
-		}
-	}
-
-	prev, err := ioutil.ReadFile(configFile)
-	if err != nil {
-		if os.IsNotExist(err) {
-			writeConfig()
-			return
-		} else {
-			ctx.Fatalln("Failed to read previous product config:", err)
-		}
-	} else if string(prev) == currentProduct {
-		return
-	}
-
-	if disable, _ := config.Environment().Get("DISABLE_AUTO_INSTALLCLEAN"); disable == "true" {
-		ctx.Println("DISABLE_AUTO_INSTALLCLEAN is set; skipping auto-clean. Your tree may be in an inconsistent state.")
-		return
-	}
-
-	ctx.BeginTrace("installclean")
-	defer ctx.EndTrace()
-
-	prevConfig := strings.TrimPrefix(strings.TrimSuffix(string(prev), suffix), prefix)
-	currentConfig := strings.TrimPrefix(strings.TrimSuffix(currentProduct, suffix), prefix)
-
-	ctx.Printf("Build configuration changed: %q -> %q, forcing installclean\n", prevConfig, currentConfig)
-
-	cleanConfig := CopyConfig(ctx, config, "installclean")
-	cleanConfig.SetKatiArgs([]string{"installclean"})
-	cleanConfig.SetNinjaArgs([]string{"installclean"})
-
-	Build(ctx, cleanConfig, BuildKati|BuildNinja)
-
-	writeConfig()
+func help(ctx Context, config Config, what int) {
+	cmd := Command(ctx, config, "make",
+		"make", "-f", "build/core/help.mk")
+	cmd.Sandbox = makeSandbox
+	cmd.Stdout = ctx.Stdout()
+	cmd.Stderr = ctx.Stderr()
+	cmd.RunOrFatal()
 }
 
 // Build the tree. The 'what' argument can be used to chose which components of
@@ -155,29 +107,20 @@
 	ctx.Verboseln("Environment:", config.Environment().Environ())
 
 	if inList("help", config.Arguments()) {
-		cmd := Command(ctx, config, "make",
-			"make", "-f", "build/core/help.mk")
-		cmd.Sandbox = makeSandbox
-		cmd.Stdout = ctx.Stdout()
-		cmd.Stderr = ctx.Stderr()
-		cmd.RunOrFatal()
+		help(ctx, config, what)
 		return
 	} else if inList("clean", config.Arguments()) || inList("clobber", config.Arguments()) {
-		// We can't use os.RemoveAll, since we don't want to remove the
-		// output directory itself, in case it's a symlink. So just do
-		// exactly what make used to do.
-		cmd := Command(ctx, config, "rm -rf $OUT_DIR/*",
-			"/bin/bash", "-c", "rm -rf "+config.OutDir()+"/*")
-		cmd.Stdout = ctx.Stdout()
-		cmd.Stderr = ctx.Stderr()
-		cmd.RunOrFatal()
-		ctx.Println("Entire build directory removed.")
+		clean(ctx, config, what)
 		return
 	}
 
 	// Start getting java version as early as possible
 	getJavaVersions(ctx, config)
 
+	// Make sure that no other Soong process is running with the same output directory
+	buildLock := BecomeSingletonOrFail(ctx, config)
+	defer buildLock.Unlock()
+
 	SetupOutDir(ctx, config)
 
 	checkCaseSensitivity(ctx, config)
@@ -187,6 +130,16 @@
 		runMakeProductConfig(ctx, config)
 	}
 
+	if inList("installclean", config.Arguments()) {
+		installClean(ctx, config, what)
+		ctx.Println("Deleted images and staging directories.")
+		return
+	} else if inList("dataclean", config.Arguments()) {
+		dataClean(ctx, config, what)
+		ctx.Println("Deleted data files.")
+		return
+	}
+
 	if what&BuildSoong != 0 {
 		// Run Soong
 		runSoongBootstrap(ctx, config)
@@ -202,7 +155,7 @@
 	}
 
 	if what&BuildNinja != 0 {
-		installcleanIfNecessary(ctx, config)
+		installCleanIfNecessary(ctx, config)
 
 		// Write combined ninja file
 		createCombinedBuildNinjaFile(ctx, config)
diff --git a/ui/build/cleanbuild.go b/ui/build/cleanbuild.go
new file mode 100644
index 0000000..27b6d14
--- /dev/null
+++ b/ui/build/cleanbuild.go
@@ -0,0 +1,169 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+func removeGlobs(ctx Context, globs ...string) {
+	for _, glob := range globs {
+		files, err := filepath.Glob(glob)
+		if err != nil {
+			// Only possible error is ErrBadPattern
+			panic(fmt.Errorf("%q: %s", glob, err))
+		}
+
+		for _, file := range files {
+			err = os.RemoveAll(file)
+			if err != nil {
+				ctx.Fatalf("Failed to remove file %q: %v", file, err)
+			}
+		}
+	}
+}
+
+// Remove everything under the out directory. Don't remove the out directory
+// itself in case it's a symlink.
+func clean(ctx Context, config Config, what int) {
+	removeGlobs(ctx, filepath.Join(config.OutDir(), "*"))
+	ctx.Println("Entire build directory removed.")
+}
+
+func dataClean(ctx Context, config Config, what int) {
+	removeGlobs(ctx, filepath.Join(config.ProductOut(), "data", "*"))
+}
+
+// installClean deletes all of the installed files -- the intent is to remove
+// files that may no longer be installed, either because the user previously
+// installed them, or they were previously installed by default but no longer
+// are.
+//
+// This is faster than a full clean, since we're not deleting the
+// intermediates.  Instead of recompiling, we can just copy the results.
+func installClean(ctx Context, config Config, what int) {
+	dataClean(ctx, config, what)
+
+	if hostCrossOutPath := config.hostCrossOut(); hostCrossOutPath != "" {
+		hostCrossOut := func(path string) string {
+			return filepath.Join(hostCrossOutPath, path)
+		}
+		removeGlobs(ctx,
+			hostCrossOut("bin"),
+			hostCrossOut("coverage"),
+			hostCrossOut("lib*"),
+			hostCrossOut("nativetest*"))
+	}
+
+	hostOutPath := config.HostOut()
+	hostOut := func(path string) string {
+		return filepath.Join(hostOutPath, path)
+	}
+
+	productOutPath := config.ProductOut()
+	productOut := func(path string) string {
+		return filepath.Join(productOutPath, path)
+	}
+
+	// Host bin, frameworks, and lib* are intentionally omitted, since
+	// otherwise we'd have to rebuild any generated files created with
+	// those tools.
+	removeGlobs(ctx,
+		hostOut("obj/NOTICE_FILES"),
+		hostOut("obj/PACKAGING"),
+		hostOut("coverage"),
+		hostOut("cts"),
+		hostOut("nativetest*"),
+		hostOut("sdk"),
+		hostOut("sdk_addon"),
+		hostOut("testcases"),
+		hostOut("vts"),
+		productOut("*.img"),
+		productOut("*.ini"),
+		productOut("*.txt"),
+		productOut("*.xlb"),
+		productOut("*.zip"),
+		productOut("kernel"),
+		productOut("data"),
+		productOut("skin"),
+		productOut("obj/NOTICE_FILES"),
+		productOut("obj/PACKAGING"),
+		productOut("recovery"),
+		productOut("root"),
+		productOut("system"),
+		productOut("system_other"),
+		productOut("vendor"),
+		productOut("oem"),
+		productOut("obj/FAKE"),
+		productOut("breakpad"),
+		productOut("cache"),
+		productOut("coverage"),
+		productOut("installer"),
+		productOut("odm"),
+		productOut("sysloader"),
+		productOut("testcases"))
+}
+
+// Since products and build variants (unfortunately) shared the same
+// PRODUCT_OUT staging directory, things can get out of sync if different
+// build configurations are built in the same tree. This function will
+// notice when the configuration has changed and call installclean to
+// remove the files necessary to keep things consistent.
+func installCleanIfNecessary(ctx Context, config Config) {
+	configFile := config.DevicePreviousProductConfig()
+	prefix := "PREVIOUS_BUILD_CONFIG := "
+	suffix := "\n"
+	currentProduct := prefix + config.TargetProduct() + "-" + config.TargetBuildVariant() + suffix
+
+	writeConfig := func() {
+		err := ioutil.WriteFile(configFile, []byte(currentProduct), 0666)
+		if err != nil {
+			ctx.Fatalln("Failed to write product config:", err)
+		}
+	}
+
+	prev, err := ioutil.ReadFile(configFile)
+	if err != nil {
+		if os.IsNotExist(err) {
+			writeConfig()
+			return
+		} else {
+			ctx.Fatalln("Failed to read previous product config:", err)
+		}
+	} else if string(prev) == currentProduct {
+		return
+	}
+
+	if disable, _ := config.Environment().Get("DISABLE_AUTO_INSTALLCLEAN"); disable == "true" {
+		ctx.Println("DISABLE_AUTO_INSTALLCLEAN is set; skipping auto-clean. Your tree may be in an inconsistent state.")
+		return
+	}
+
+	ctx.BeginTrace("installclean")
+	defer ctx.EndTrace()
+
+	prevConfig := strings.TrimPrefix(strings.TrimSuffix(string(prev), suffix), prefix)
+	currentConfig := strings.TrimPrefix(strings.TrimSuffix(currentProduct, suffix), prefix)
+
+	ctx.Printf("Build configuration changed: %q -> %q, forcing installclean\n", prevConfig, currentConfig)
+
+	installClean(ctx, config, 0)
+
+	writeConfig()
+}
diff --git a/ui/build/config.go b/ui/build/config.go
index 51cff50..7e8091b 100644
--- a/ui/build/config.go
+++ b/ui/build/config.go
@@ -167,22 +167,6 @@
 	return Config{ret}
 }
 
-// CopyConfig copies the configuration from an existing configuration, but replaces
-// the Arguments() list with a new set. Useful if you need to run a different build
-// with the same state as an existing build config.
-func CopyConfig(ctx Context, config Config, args ...string) Config {
-	return Config{&configImpl{
-		arguments: args,
-		goma:      config.goma,
-		environ:   config.environ.Copy(),
-
-		parallel:  config.parallel,
-		keepGoing: config.keepGoing,
-		verbose:   config.verbose,
-		dist:      config.dist,
-	}}
-}
-
 // Lunch configures the environment for a specific product similarly to the
 // `lunch` bash function.
 func (c *configImpl) Lunch(ctx Context, product, variant string) {
@@ -369,8 +353,37 @@
 	return filepath.Join(c.SoongOutDir(), "make_vars-"+c.TargetProduct()+".mk")
 }
 
+func (c *configImpl) ProductOut() string {
+	if buildType, ok := c.environ.Get("TARGET_BUILD_TYPE"); ok && buildType == "debug" {
+		return filepath.Join(c.OutDir(), "debug", "target", "product", c.TargetDevice())
+	} else {
+		return filepath.Join(c.OutDir(), "target", "product", c.TargetDevice())
+	}
+}
+
 func (c *configImpl) DevicePreviousProductConfig() string {
-	return filepath.Join(c.OutDir(), "target", "product", c.TargetDevice(), "previous_build_config.mk")
+	return filepath.Join(c.ProductOut(), "previous_build_config.mk")
+}
+
+func (c *configImpl) hostOutRoot() string {
+	if buildType, ok := c.environ.Get("HOST_BUILD_TYPE"); ok && buildType == "debug" {
+		return filepath.Join(c.OutDir(), "debug", "host")
+	} else {
+		return filepath.Join(c.OutDir(), "host")
+	}
+}
+
+func (c *configImpl) HostOut() string {
+	return filepath.Join(c.hostOutRoot(), c.HostPrebuiltTag())
+}
+
+// This probably needs to be multi-valued, so not exporting it for now
+func (c *configImpl) hostCrossOut() string {
+	if runtime.GOOS == "linux" {
+		return filepath.Join(c.hostOutRoot(), "windows-x86")
+	} else {
+		return ""
+	}
 }
 
 func (c *configImpl) HostPrebuiltTag() string {
diff --git a/ui/build/java.go b/ui/build/java.go
index 5a09b1a..473af01 100644
--- a/ui/build/java.go
+++ b/ui/build/java.go
@@ -66,10 +66,14 @@
 	var required_java_version string
 	var java_version_regexp *regexp.Regexp
 	var javac_version_regexp *regexp.Regexp
-	if legacy, _ := config.Environment().Get("LEGACY_USE_JAVA7"); legacy != "" {
-		required_java_version = "1.7"
-		java_version_regexp = regexp.MustCompile(`^java .*[ "]1\.7[\. "$]`)
-		javac_version_regexp = regexp.MustCompile(`[ "]1\.7[\. "$]`)
+
+	oj9_env, _ := config.Environment().Get("EXPERIMENTAL_USE_OPENJDK9")
+	experimental_use_openjdk9 := oj9_env != ""
+
+	if experimental_use_openjdk9 {
+		required_java_version = "9"
+		java_version_regexp = regexp.MustCompile(`^java .* "9.*"`)
+		javac_version_regexp = regexp.MustCompile(`^javac 9`)
 	} else {
 		required_java_version = "1.8"
 		java_version_regexp = regexp.MustCompile(`[ "]1\.8[\. "$]`)
@@ -101,7 +105,10 @@
 	}
 
 	if runtime.GOOS == "linux" {
-		if !strings.Contains(java_version, "openjdk") {
+		// Early access builds of OpenJDK 9 do not contain the string "openjdk" in the
+		// version name. TODO(tobiast): Reconsider once the OpenJDK 9 toolchain is stable.
+		// http://b/62123342
+		if !strings.Contains(java_version, "openjdk") && !experimental_use_openjdk9 {
 			ctx.Println("*******************************************************")
 			ctx.Println("You are attempting to build with an unsupported JDK.")
 			ctx.Println()
diff --git a/ui/build/make.go b/ui/build/make.go
index 2b39926..edf6d96 100644
--- a/ui/build/make.go
+++ b/ui/build/make.go
@@ -81,9 +81,10 @@
 	// Variables to export into the environment of Kati/Ninja
 	exportEnvVars := []string{
 		// So that we can use the correct TARGET_PRODUCT if it's been
-		// modified by PRODUCT-* arguments
+		// modified by PRODUCT-*/APP-* arguments
 		"TARGET_PRODUCT",
 		"TARGET_BUILD_VARIANT",
+		"TARGET_BUILD_APPS",
 
 		// compiler wrappers set up by make
 		"CC_WRAPPER",
diff --git a/ui/build/proc_sync.go b/ui/build/proc_sync.go
new file mode 100644
index 0000000..857786d
--- /dev/null
+++ b/ui/build/proc_sync.go
@@ -0,0 +1,143 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"os"
+	"path/filepath"
+	"syscall"
+	"time"
+
+	"android/soong/ui/logger"
+)
+
+// This file provides cross-process synchronization methods
+// i.e. making sure only one Soong process is running for a given output directory
+
+func BecomeSingletonOrFail(ctx Context, config Config) (lock *fileLock) {
+	lockingInfo, err := newLock(config.OutDir())
+	if err != nil {
+		ctx.Logger.Fatal(err)
+	}
+	err = lockSynchronous(*lockingInfo, newSleepWaiter(lockfilePollDuration, lockfileTimeout), ctx.Logger)
+	if err != nil {
+		ctx.Logger.Fatal(err)
+	}
+	return lockingInfo
+}
+
+var lockfileTimeout = time.Second * 10
+var lockfilePollDuration = time.Second
+
+type lockable interface {
+	tryLock() error
+	Unlock() error
+	description() string
+}
+
+var _ lockable = (*fileLock)(nil)
+
+type fileLock struct {
+	File *os.File
+}
+
+func (l fileLock) description() (path string) {
+	return l.File.Name()
+}
+func (l fileLock) tryLock() (err error) {
+	return syscall.Flock(int(l.File.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+}
+func (l fileLock) Unlock() (err error) {
+	return l.File.Close()
+}
+
+func lockSynchronous(lock lockable, waiter waiter, logger logger.Logger) (err error) {
+
+	waited := false
+
+	for {
+		err = lock.tryLock()
+		if err == nil {
+			if waited {
+				// If we had to wait at all, then when the wait is done, we inform the user
+				logger.Printf("Acquired lock on %v; previous Soong process must have completed. Continuing...\n", lock.description())
+			}
+			return nil
+		}
+
+		waited = true
+
+		done, description := waiter.checkDeadline()
+
+		if done {
+			return fmt.Errorf("Tried to lock %s, but timed out %s . Make sure no other Soong process is using it",
+				lock.description(), waiter.summarize())
+		} else {
+			logger.Printf("Waiting up to %s to lock %v to ensure no other Soong process is running in the same output directory\n", description, lock.description())
+			waiter.wait()
+		}
+	}
+}
+
+func newLock(basedir string) (lock *fileLock, err error) {
+	lockPath := filepath.Join(basedir, ".lock")
+
+	os.MkdirAll(basedir, 0777)
+	lockfileDescriptor, err := os.OpenFile(lockPath, os.O_RDWR|os.O_CREATE, 0666)
+	if err != nil {
+		return nil, errors.New("failed to open " + lockPath)
+	}
+	lockingInfo := &fileLock{File: lockfileDescriptor}
+
+	return lockingInfo, nil
+}
+
+type waiter interface {
+	wait()
+	checkDeadline() (done bool, remainder string)
+	summarize() (summary string)
+}
+
+type sleepWaiter struct {
+	sleepInterval time.Duration
+	deadline      time.Time
+
+	totalWait time.Duration
+}
+
+var _ waiter = (*sleepWaiter)(nil)
+
+func newSleepWaiter(interval time.Duration, duration time.Duration) (waiter *sleepWaiter) {
+	return &sleepWaiter{interval, time.Now().Add(duration), duration}
+}
+
+func (s sleepWaiter) wait() {
+	time.Sleep(s.sleepInterval)
+}
+func (s *sleepWaiter) checkDeadline() (done bool, remainder string) {
+	remainingSleep := s.deadline.Sub(time.Now())
+	numSecondsRounded := math.Floor(remainingSleep.Seconds()*10+0.5) / 10
+	if remainingSleep > 0 {
+		return false, fmt.Sprintf("%vs", numSecondsRounded)
+	} else {
+		return true, ""
+	}
+}
+func (s sleepWaiter) summarize() (summary string) {
+	return fmt.Sprintf("polling every %v until %v", s.sleepInterval, s.totalWait)
+}
diff --git a/ui/build/proc_sync_test.go b/ui/build/proc_sync_test.go
new file mode 100644
index 0000000..857bea3
--- /dev/null
+++ b/ui/build/proc_sync_test.go
@@ -0,0 +1,241 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"syscall"
+	"testing"
+
+	"android/soong/ui/logger"
+)
+
+// some util methods and data structures that aren't directly part of a test
+func makeLockDir() (path string, err error) {
+	return ioutil.TempDir("", "soong_lock_test")
+}
+func lockOrFail(t *testing.T) (lock fileLock) {
+	lockDir, err := makeLockDir()
+	var lockPointer *fileLock
+	if err == nil {
+		lockPointer, err = newLock(lockDir)
+	}
+	if err != nil {
+		os.RemoveAll(lockDir)
+		t.Fatalf("Failed to create lock: %v", err)
+	}
+
+	return *lockPointer
+}
+func removeTestLock(fileLock fileLock) {
+	lockdir := filepath.Dir(fileLock.File.Name())
+	os.RemoveAll(lockdir)
+}
+
+// countWaiter only exists for the purposes of testing lockSynchronous
+type countWaiter struct {
+	numWaitsElapsed int
+	maxNumWaits     int
+}
+
+func newCountWaiter(count int) (waiter *countWaiter) {
+	return &countWaiter{0, count}
+}
+
+func (c *countWaiter) wait() {
+	c.numWaitsElapsed++
+}
+func (c *countWaiter) checkDeadline() (done bool, remainder string) {
+	numWaitsRemaining := c.maxNumWaits - c.numWaitsElapsed
+	if numWaitsRemaining < 1 {
+		return true, ""
+	}
+	return false, fmt.Sprintf("%v waits remain", numWaitsRemaining)
+}
+func (c countWaiter) summarize() (summary string) {
+	return fmt.Sprintf("waiting %v times", c.maxNumWaits)
+}
+
+// countLock only exists for the purposes of testing lockSynchronous
+type countLock struct {
+	nextIndex    int
+	successIndex int
+}
+
+var _ lockable = (*countLock)(nil)
+
+// returns a countLock that succeeds on iteration <index>
+func testLockCountingTo(index int) (lock *countLock) {
+	return &countLock{nextIndex: 0, successIndex: index}
+}
+func (c *countLock) description() (message string) {
+	return fmt.Sprintf("counter that counts from %v to %v", c.nextIndex, c.successIndex)
+}
+func (c *countLock) tryLock() (err error) {
+	currentIndex := c.nextIndex
+	c.nextIndex++
+	if currentIndex == c.successIndex {
+		return nil
+	}
+	return fmt.Errorf("Lock busy: %s", c.description())
+}
+func (c *countLock) Unlock() (err error) {
+	if c.nextIndex == c.successIndex {
+		return nil
+	}
+	return fmt.Errorf("Not locked: %s", c.description())
+}
+
+// end of util methods
+
+// start of tests
+
+// simple test
+func TestGetLock(t *testing.T) {
+	lockfile := lockOrFail(t)
+	defer removeTestLock(lockfile)
+}
+
+// a more complicated test that spans multiple processes
+var lockPathVariable = "LOCK_PATH"
+var successStatus = 0
+var unexpectedError = 1
+var busyStatus = 2
+
+func TestTrylock(t *testing.T) {
+	lockpath := os.Getenv(lockPathVariable)
+	if len(lockpath) < 1 {
+		checkTrylockMainProcess(t)
+	} else {
+		getLockAndExit(lockpath)
+	}
+}
+
+// the portion of TestTrylock that runs in the main process
+func checkTrylockMainProcess(t *testing.T) {
+	var err error
+	lockfile := lockOrFail(t)
+	defer removeTestLock(lockfile)
+	lockdir := filepath.Dir(lockfile.File.Name())
+	otherAcquired, message, err := forkAndGetLock(lockdir)
+	if err != nil {
+		t.Fatalf("Unexpected error in subprocess trying to lock uncontested fileLock: %v. Subprocess output: %q", err, message)
+	}
+	if !otherAcquired {
+		t.Fatalf("Subprocess failed to lock uncontested fileLock. Subprocess output: %q", message)
+	}
+
+	err = lockfile.tryLock()
+	if err != nil {
+		t.Fatalf("Failed to lock fileLock: %v", err)
+	}
+
+	reacquired, message, err := forkAndGetLock(filepath.Dir(lockfile.File.Name()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if reacquired {
+		t.Fatalf("Permitted locking fileLock twice. Subprocess output: %q", message)
+	}
+
+	err = lockfile.Unlock()
+	if err != nil {
+		t.Fatalf("Error unlocking fileLock: %v", err)
+	}
+
+	reacquired, message, err = forkAndGetLock(filepath.Dir(lockfile.File.Name()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !reacquired {
+		t.Fatalf("Subprocess failed to acquire lock after it was released by the main process. Subprocess output: %q", message)
+	}
+}
+func forkAndGetLock(lockDir string) (acquired bool, subprocessOutput []byte, err error) {
+	cmd := exec.Command(os.Args[0], "-test.run=TestTrylock")
+	cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", lockPathVariable, lockDir))
+	subprocessOutput, err = cmd.CombinedOutput()
+	exitStatus := successStatus
+	if exitError, ok := err.(*exec.ExitError); ok {
+		if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
+			exitStatus = waitStatus.ExitStatus()
+		}
+	}
+	if exitStatus == successStatus {
+		return true, subprocessOutput, nil
+	} else if exitStatus == busyStatus {
+		return false, subprocessOutput, nil
+	} else {
+		return false, subprocessOutput, fmt.Errorf("Unexpected status %v", exitStatus)
+	}
+}
+
+// This function runs in a different process. See TestTrylock
+func getLockAndExit(lockpath string) {
+	fmt.Printf("Will lock path %q\n", lockpath)
+	lockfile, err := newLock(lockpath)
+	exitStatus := unexpectedError
+	if err == nil {
+		err = lockfile.tryLock()
+		if err == nil {
+			exitStatus = successStatus
+		} else {
+			exitStatus = busyStatus
+		}
+	}
+	fmt.Printf("Tried to lock path %s. Received error %v. Exiting with status %v\n", lockpath, err, exitStatus)
+	os.Exit(exitStatus)
+}
+
+func TestLockFirstTrySucceeds(t *testing.T) {
+	noopLogger := logger.New(ioutil.Discard)
+	lock := testLockCountingTo(0)
+	waiter := newCountWaiter(0)
+	err := lockSynchronous(lock, waiter, noopLogger)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if waiter.numWaitsElapsed != 0 {
+		t.Fatalf("Incorrect number of waits elapsed; expected 0, got %v", waiter.numWaitsElapsed)
+	}
+}
+func TestLockThirdTrySucceeds(t *testing.T) {
+	noopLogger := logger.New(ioutil.Discard)
+	lock := testLockCountingTo(2)
+	waiter := newCountWaiter(2)
+	err := lockSynchronous(lock, waiter, noopLogger)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if waiter.numWaitsElapsed != 2 {
+		t.Fatalf("Incorrect number of waits elapsed; expected 2, got %v", waiter.numWaitsElapsed)
+	}
+}
+func TestLockTimedOut(t *testing.T) {
+	noopLogger := logger.New(ioutil.Discard)
+	lock := testLockCountingTo(3)
+	waiter := newCountWaiter(2)
+	err := lockSynchronous(lock, waiter, noopLogger)
+	if err == nil {
+		t.Fatalf("Appeared to have acquired lock on iteration %v which should not be available until iteration %v", waiter.numWaitsElapsed, lock.successIndex)
+	}
+	if waiter.numWaitsElapsed != waiter.maxNumWaits {
+		t.Fatalf("Waited an incorrect number of times; expected %v, got %v", waiter.maxNumWaits, waiter.numWaitsElapsed)
+	}
+}