Merge changes Ib9d1863c,I2a6eb327

* changes:
  SDK targets should inherit from core_64_bit.mk
  Fix issue in symlinking 64 bit executables.
diff --git a/CleanSpec.mk b/CleanSpec.mk
index ec0533d..6f436ce 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -273,6 +273,9 @@
 # ims-common.jar added to BOOTCLASSPATH
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/ETC/init.environ.rc_intermediates)
 
+# Change ro.zygote for core_64_bit.mk from zygote32_64 to zygote64_32
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/64_bit_blacklist.mk b/core/64_bit_blacklist.mk
deleted file mode 100644
index b7420cd..0000000
--- a/core/64_bit_blacklist.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-ifneq ($(TARGET_2ND_ARCH),)
-
-# misc build errors
-_64_bit_directory_blacklist += \
-	device/generic/goldfish/opengl \
-	device/generic/goldfish/camera \
-
-_64_bit_directory_blacklist_pattern := $(addsuffix %,$(_64_bit_directory_blacklist))
-
-define directory_is_64_bit_blacklisted
-$(if $(filter $(_64_bit_directory_blacklist_pattern),$(1)),true)
-endef
-else
-define directory_is_64_bit_blacklisted
-endef
-endif
diff --git a/core/Makefile b/core/Makefile
index 446bcb0..394f924 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -634,10 +634,15 @@
 endif
 
 ifeq ($(INTERNAL_USERIMAGES_USE_EXT),true)
-INTERNAL_USERIMAGES_DEPS := $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(SIMG2IMG) $(E2FSCK)
+INTERNAL_USERIMAGES_DEPS := $(SIMG2IMG)
+INTERNAL_USERIMAGES_DEPS += $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(E2FSCK)
+ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
+INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG) $(MAKE_F2FS)
+endif
 else
 INTERNAL_USERIMAGES_DEPS := $(MKYAFFS2)
 endif
+
 INTERNAL_USERIMAGES_BINARY_PATHS := $(sort $(dir $(INTERNAL_USERIMAGES_DEPS)))
 
 ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY))
@@ -652,6 +657,7 @@
 define generate-userimage-prop-dictionary
 $(if $(INTERNAL_USERIMAGES_EXT_VARIANT),$(hide) echo "fs_type=$(INTERNAL_USERIMAGES_EXT_VARIANT)" >> $(1))
 $(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1335,8 +1341,10 @@
 	$(hide) ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
 	@# Zip everything up, preserving symlinks
 	$(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .)
-	@# Run fs_config on all the system, boot ramdisk, and recovery ramdisk files in the zip, and save the output
+	@# Run fs_config on all the system, vendor, boot ramdisk,
+	@# and recovery ramdisk files in the zip, and save the output
 	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
+	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
 	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
 	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
 	$(hide) (cd $(zip_root) && zip -q ../$(notdir $@) META/*filesystem_config.txt)
@@ -1368,7 +1376,7 @@
 
 $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
 	@echo "Package OTA: $@"
-	$(hide) MKBOOTIMG=$(BOARD_CUSTOM_MKBOOTIMG) \
+	$(hide) MKBOOTIMG=$(MKBOOTIMG) \
 	   ./build/tools/releasetools/ota_from_target_files -v \
 	   --block \
 	   -p $(HOST_OUT) \
@@ -1397,7 +1405,7 @@
 
 $(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
 	@echo "Package: $@"
-	$(hide) MKBOOTIMG=$(BOARD_CUSTOM_MKBOOTIMG) \
+	$(hide) MKBOOTIMG=$(MKBOOTIMG) \
 	   ./build/tools/releasetools/img_from_target_files -v \
 	   -p $(HOST_OUT) \
 	   $(BUILT_TARGET_FILES_PACKAGE) $@
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 548ed13..fbf9770 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -405,7 +405,7 @@
 #                 be up-to-date.
 ifdef LOCAL_IS_HOST_MODULE
 ifeq ($(USE_CORE_LIB_BOOTCLASSPATH),true)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_BOOTCLASSPATH := -bootclasspath $(call java-lib-deps,core-hostdex,$(LOCAL_IS_HOST_MODULE))
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_BOOTCLASSPATH := -bootclasspath $(call java-lib-deps,core-libart-hostdex,$(LOCAL_IS_HOST_MODULE))
 
 full_shared_java_libs := $(call java-lib-files,$(LOCAL_JAVA_LIBRARIES),$(LOCAL_IS_HOST_MODULE))
 full_java_lib_deps := $(call java-lib-deps,$(LOCAL_JAVA_LIBRARIES),$(LOCAL_IS_HOST_MODULE))
diff --git a/core/clang/config.mk b/core/clang/config.mk
index 1125368..1de46f0 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -24,9 +24,13 @@
 CLANG_CONFIG_EXTRA_CPPFLAGS :=
 CLANG_CONFIG_EXTRA_LDFLAGS :=
 
-CLANG_CONFIG_EXTRA_CFLAGS := \
+CLANG_CONFIG_EXTRA_CFLAGS += \
   -D__compiler_offsetof=__builtin_offsetof
 
+# Help catch common 32/64-bit errors.
+CLANG_CONFIG_EXTRA_CFLAGS += \
+  -Werror=int-conversion
+
 CLANG_CONFIG_UNKNOWN_CFLAGS := \
   -funswitch-loops \
   -fno-tree-sra \
diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk
index fc56604..0932aa1 100644
--- a/core/cleanbuild.mk
+++ b/core/cleanbuild.mk
@@ -81,6 +81,28 @@
     $(info Clean step: $(INTERNAL_CLEAN_STEP.$(step))) \
     $(shell $(INTERNAL_CLEAN_STEP.$(step))) \
    )
+
+  # Rewrite the clean step for the second arch.
+  ifdef TARGET_2ND_ARCH
+  # $(1): the clean step cmd
+  # $(2): the prefix to search for
+  # $(3): the prefix to replace with
+  define -cs-rewrite-cleanstep
+  $(if $(filter $(2)/%,$(1)),\
+    $(eval _crs_new_cmd := $(patsubst $(2)/%,$(3)/%,$(1)))\
+    $(info Clean step: $(_crs_new_cmd))\
+    $(shell $(_crs_new_cmd)))
+  endef
+  $(foreach step,$(steps), \
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_INTERMEDIATES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES))\
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_SHARED_LIBRARIES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES))\
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_VENDOR_SHARED_LIBRARIES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES))\
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES),$(TARGET_OUT_INTERMEDIATES))\
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES),$(TARGET_OUT_SHARED_LIBRARIES))\
+    $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES),$(TARGET_OUT_VENDOR_SHARED_LIBRARIES))\
+    )
+  endif
+  _crs_new_cmd :=
   steps :=
 endif
 CURRENT_CLEAN_BUILD_VERSION :=
diff --git a/core/config.mk b/core/config.mk
index d56f1f8..64dc68e 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -254,7 +254,7 @@
 #
 # Note that this assumes that the 2ND_CPU_ABI for a 64 bit target
 # is always 32 bits. If this isn't the case, these variables should
-# be overriden in the boarc configuration.
+# be overriden in the board configuration.
 ifeq (,$(TARGET_CPU_ABI_LIST_64_BIT))
   ifeq (true|true,$(TARGET_IS_64_BIT)|$(TARGET_SUPPORTS_64_BIT_APPS))
     TARGET_CPU_ABI_LIST_64_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
@@ -386,6 +386,8 @@
 MKEXT2IMG := $(HOST_OUT_EXECUTABLES)/genext2fs$(HOST_EXECUTABLE_SUFFIX)
 MAKE_EXT4FS := $(HOST_OUT_EXECUTABLES)/make_ext4fs$(HOST_EXECUTABLE_SUFFIX)
 MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg.sh
+MAKE_F2FS := $(HOST_OUT_EXECUTABLES)/make_f2fs$(HOST_EXECUTABLE_SUFFIX)
+MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
 MKEXT2BOOTIMG := external/genext2fs/mkbootimg_ext2.sh
 SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
 E2FSCK := $(HOST_OUT_EXECUTABLES)/e2fsck$(HOST_EXECUTABLE_SUFFIX)
@@ -530,7 +532,7 @@
 
 # allow overriding default Java libraries on a per-target basis
 ifeq ($(TARGET_DEFAULT_JAVA_LIBRARIES),)
-  TARGET_DEFAULT_JAVA_LIBRARIES := core core-junit ext framework framework2
+  TARGET_DEFAULT_JAVA_LIBRARIES := core-libart core-junit ext framework framework2
 endif
 
 TARGET_CPU_SMP ?= true
diff --git a/core/definitions.mk b/core/definitions.mk
index 73d4828..ce287dc 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -424,7 +424,7 @@
     $(if $(_idfName),, \
         $(error $(LOCAL_PATH): Name not defined in call to intermediates-dir-for)) \
     $(eval _idfPrefix := $(if $(strip $(3)),HOST,TARGET)) \
-    $(eval _idf2ndArchPrefix := $(if $(call directory_is_64_bit_blacklisted,$(LOCAL_PATH))$(strip $(5)),$(TARGET_2ND_ARCH_VAR_PREFIX))) \
+    $(eval _idf2ndArchPrefix := $(if $(strip $(5)),$(TARGET_2ND_ARCH_VAR_PREFIX))) \
     $(if $(filter $(_idfPrefix)-$(_idfClass),$(COMMON_MODULE_CLASSES))$(4), \
         $(eval _idfIntBase := $($(_idfPrefix)_OUT_COMMON_INTERMEDIATES)) \
       ,$(if $(filter $(_idfClass),SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP),\
diff --git a/core/droiddoc.mk b/core/droiddoc.mk
index 2a9cfc6..ea5c2f2 100644
--- a/core/droiddoc.mk
+++ b/core/droiddoc.mk
@@ -65,8 +65,8 @@
     $(full_target): PRIVATE_BOOTCLASSPATH := $(call java-lib-files, sdk_v$(LOCAL_SDK_VERSION))
   endif
 else
-  LOCAL_JAVA_LIBRARIES := core ext framework framework2 $(LOCAL_JAVA_LIBRARIES)
-  $(full_target): PRIVATE_BOOTCLASSPATH := $(call java-lib-files, core)
+  LOCAL_JAVA_LIBRARIES := core-libart ext framework framework2 $(LOCAL_JAVA_LIBRARIES)
+  $(full_target): PRIVATE_BOOTCLASSPATH := $(call java-lib-files, core-libart)
 endif  # LOCAL_SDK_VERSION
 LOCAL_JAVA_LIBRARIES := $(sort $(LOCAL_JAVA_LIBRARIES))
 
diff --git a/core/host_dalvik_java_library.mk b/core/host_dalvik_java_library.mk
index 5024086..61eb3ff 100644
--- a/core/host_dalvik_java_library.mk
+++ b/core/host_dalvik_java_library.mk
@@ -27,7 +27,7 @@
 #######################################
 
 ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
-  LOCAL_JAVA_LIBRARIES +=  core-hostdex
+  LOCAL_JAVA_LIBRARIES +=  core-libart-hostdex
 endif
 
 full_classes_compiled_jar := $(intermediates.COMMON)/classes-full-debug.jar
diff --git a/core/host_dalvik_static_java_library.mk b/core/host_dalvik_static_java_library.mk
index 05c4b16..3ae74e4 100644
--- a/core/host_dalvik_static_java_library.mk
+++ b/core/host_dalvik_static_java_library.mk
@@ -21,7 +21,7 @@
 #
 
 USE_CORE_LIB_BOOTCLASSPATH := true
-LOCAL_JAVA_LIBRARIES += core-hostdex
+LOCAL_JAVA_LIBRARIES += core-libart-hostdex
 
 include $(BUILD_SYSTEM)/host_java_library.mk
 
diff --git a/core/main.mk b/core/main.mk
index ec26949..99153ea 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -92,8 +92,6 @@
 # and host information.
 include $(BUILD_SYSTEM)/config.mk
 
-include $(BUILD_SYSTEM)/64_bit_blacklist.mk
-
 # This allows us to force a clean build - included after the config.mk
 # environment setup is done, but before we generate any dependencies.  This
 # file does the rm -rf inline so the deps which are all done below will
diff --git a/core/module_arch_supported.mk b/core/module_arch_supported.mk
index 15fd648..a5e4a7c 100644
--- a/core/module_arch_supported.mk
+++ b/core/module_arch_supported.mk
@@ -28,8 +28,6 @@
 my_module_arch_supported := false
 else ifeq ($($(my_prefix)IS_64_BIT)|$(my_module_multilib),|64)
 my_module_arch_supported := false
-else ifeq ($(call directory_is_64_bit_blacklisted,$(LOCAL_PATH)),true)
-my_module_arch_supported := false
 endif
 else # LOCAL_2ND_ARCH_VAR_PREFIX
 ifeq ($(my_module_multilib),first)
diff --git a/core/pdk_config.mk b/core/pdk_config.mk
index 6a874fb..fd63820 100644
--- a/core/pdk_config.mk
+++ b/core/pdk_config.mk
@@ -53,7 +53,7 @@
 # all paths under out dir
 PDK_PLATFORM_JAVA_ZIP_JAVA_TARGET_LIB_DIR += \
 	target/common/obj/JAVA_LIBRARIES/android_stubs_current_intermediates \
-	target/common/obj/JAVA_LIBRARIES/core_intermediates \
+	target/common/obj/JAVA_LIBRARIES/core-libart_intermediates \
 	target/common/obj/JAVA_LIBRARIES/core-junit_intermediates \
 	target/common/obj/JAVA_LIBRARIES/ext_intermediates \
 	target/common/obj/JAVA_LIBRARIES/framework_intermediates \
diff --git a/core/product.mk b/core/product.mk
index 6a6327d..7eef2e5 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -250,6 +250,7 @@
 	BOARD_BOOTIMAGE_PARTITION_SIZE \
 	BOARD_RECOVERYIMAGE_PARTITION_SIZE \
 	BOARD_SYSTEMIMAGE_PARTITION_SIZE \
+	BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE \
 	BOARD_USERDATAIMAGE_PARTITION_SIZE \
 	BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE \
 	BOARD_CACHEIMAGE_PARTITION_SIZE \
diff --git a/core/proguard_basic_keeps.flags b/core/proguard_basic_keeps.flags
index 622d4ee..4a85db0 100644
--- a/core/proguard_basic_keeps.flags
+++ b/core/proguard_basic_keeps.flags
@@ -59,3 +59,6 @@
 # platform version.  We know about them, and they are safe.
 # See proguard-android.txt in the SDK package.
 -dontwarn android.support.**
+
+# Less spammy.
+-dontnote
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index 4b008a9..6cf171a 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -101,7 +101,7 @@
 	$(PRIVATE_PARAMS) CollectAllTests $(1) $(2) $(3) "$(4)" $(5) $(6)
 endef
 
-CORE_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,core,,COMMON)
+CORE_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,core-libart,,COMMON)
 CONSCRYPT_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,conscrypt,,COMMON)
 BOUNCYCASTLE_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,bouncycastle,,COMMON)
 APACHEXML_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,apache-xml,,COMMON)
diff --git a/envsetup.sh b/envsetup.sh
index 3419c32..8dc546f 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -966,6 +966,11 @@
     $GDB_CMD -x "$@"
 }
 
+function get_symbols_directory()
+{
+    echo $(get_abs_build_var TARGET_OUT_UNSTRIPPED)
+}
+
 # process the symbolic link of /proc/$PID/exe and use the host file tool to
 # determine whether it is a 32-bit or 64-bit executable. It returns "" or "64"
 # which can be conveniently used as suffix.
@@ -973,14 +978,9 @@
 {
     local PID="$1"
     if [ "$PID" ] ; then
-        local EXE=`adb shell ls -l /proc/$PID/exe \
-                   | tr -d '\r' \
-                   | cut -d'>' -f2 \
-                   | tr -d ' ' \
-                   | cut -d'/' -f4`
-
-        local OUT_EXE_SYMBOLS=$(get_abs_build_var TARGET_OUT_EXECUTABLES_UNSTRIPPED)
-        local IS64BIT=`file $OUT_EXE_SYMBOLS/$EXE | grep "64-bit"`
+        local EXE=`adb shell readlink /proc/$PID/exe`
+        local EXE_DIR=`get_abs_build_var PRODUCT_OUT`
+        local IS64BIT=`file "$EXE_DIR$EXE" | grep "64-bit"`
         if [ "$IS64BIT" != "" ]; then
             echo "64"
         else
@@ -1000,7 +1000,7 @@
    local OUT_SYMBOLS=$(get_abs_build_var TARGET_OUT_UNSTRIPPED)
    local OUT_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_SHARED_LIBRARIES_UNSTRIPPED)
    local OUT_VENDOR_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_VENDOR_SHARED_LIBRARIES_UNSTRIPPED)
-   local OUT_EXE_SYMBOLS=$(get_abs_build_var TARGET_OUT_EXECUTABLES_UNSTRIPPED)
+   local OUT_EXE_SYMBOLS=$(get_symbols_directory)
    local PREBUILTS=$(get_abs_build_var ANDROID_PREBUILTS)
    local ARCH=$(get_build_var TARGET_ARCH)
    local GDB
@@ -1080,6 +1080,7 @@
        else
            WHICH_GDB=$ANDROID_TOOLCHAIN_2ND_ARCH/$GDB
        fi
+
        gdbwrapper $WHICH_GDB "$OUT_ROOT/gdbclient.cmds" "$OUT_EXE_SYMBOLS/$EXE"
   else
        echo "Unable to determine build system output dir."
diff --git a/target/board/generic_x86/BoardConfig.mk b/target/board/generic_x86/BoardConfig.mk
index 2dc7615..5d091f5 100644
--- a/target/board/generic_x86/BoardConfig.mk
+++ b/target/board/generic_x86/BoardConfig.mk
@@ -42,7 +42,10 @@
 BOARD_FLASH_BLOCK_SIZE := 512
 TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
 
-BOARD_SEPOLICY_DIRS += build/target/board/generic_x86/sepolicy
+BOARD_SEPOLICY_DIRS += \
+        build/target/board/generic/sepolicy \
+        build/target/board/generic_x86/sepolicy
+
 BOARD_SEPOLICY_UNION += \
         device.te \
         domain.te \
@@ -53,4 +56,5 @@
         qemud.te \
         rild.te \
         shell.te \
-        system_server.te
+        system_server.te \
+        zygote.te
diff --git a/target/board/generic_x86/sepolicy/device.te b/target/board/generic_x86/sepolicy/device.te
deleted file mode 100644
index e4af13c..0000000
--- a/target/board/generic_x86/sepolicy/device.te
+++ /dev/null
@@ -1 +0,0 @@
-type qemu_device, dev_type;
diff --git a/target/board/generic_x86/sepolicy/domain.te b/target/board/generic_x86/sepolicy/domain.te
index c3161b3..0bc8d87 100644
--- a/target/board/generic_x86/sepolicy/domain.te
+++ b/target/board/generic_x86/sepolicy/domain.te
@@ -1,4 +1 @@
-# For /sys/qemu_trace files in the emulator.
-allow domain sysfs_writable:file rw_file_perms;
 allow domain cpuctl_device:dir search;
-allow domain qemu_device:chr_file rw_file_perms;
diff --git a/target/board/generic_x86/sepolicy/file.te b/target/board/generic_x86/sepolicy/file.te
deleted file mode 100644
index 6fad80a..0000000
--- a/target/board/generic_x86/sepolicy/file.te
+++ /dev/null
@@ -1 +0,0 @@
-type qemud_socket, file_type;
diff --git a/target/board/generic_x86/sepolicy/file_contexts b/target/board/generic_x86/sepolicy/file_contexts
deleted file mode 100644
index f204cde..0000000
--- a/target/board/generic_x86/sepolicy/file_contexts
+++ /dev/null
@@ -1,4 +0,0 @@
-/dev/qemu_.*		u:object_r:qemu_device:s0
-/dev/socket/qemud	u:object_r:qemud_socket:s0
-/system/bin/qemud	u:object_r:qemud_exec:s0
-/sys/qemu_trace(/.*)?	--	u:object_r:sysfs_writable:s0
diff --git a/target/board/generic_x86/sepolicy/qemud.te b/target/board/generic_x86/sepolicy/qemud.te
deleted file mode 100644
index 4ff02ec..0000000
--- a/target/board/generic_x86/sepolicy/qemud.te
+++ /dev/null
@@ -1,6 +0,0 @@
-# qemu support daemon
-type qemud, domain;
-type qemud_exec, exec_type, file_type;
-
-init_daemon_domain(qemud)
-unconfined_domain(qemud)
diff --git a/target/board/generic_x86/sepolicy/rild.te b/target/board/generic_x86/sepolicy/rild.te
deleted file mode 100644
index e148b6c..0000000
--- a/target/board/generic_x86/sepolicy/rild.te
+++ /dev/null
@@ -1 +0,0 @@
-unix_socket_connect(rild, qemud, qemud)
diff --git a/target/board/generic_x86/sepolicy/shell.te b/target/board/generic_x86/sepolicy/shell.te
deleted file mode 100644
index b246d7e..0000000
--- a/target/board/generic_x86/sepolicy/shell.te
+++ /dev/null
@@ -1 +0,0 @@
-allow shell serial_device:chr_file rw_file_perms;
diff --git a/target/board/generic_x86/sepolicy/system_server.te b/target/board/generic_x86/sepolicy/system_server.te
index 0ede971..5d98a14 100644
--- a/target/board/generic_x86/sepolicy/system_server.te
+++ b/target/board/generic_x86/sepolicy/system_server.te
@@ -1,2 +1 @@
 allow system_server self:process execmem;
-unix_socket_connect(system_server, qemud, qemud)
diff --git a/target/product/core_64_bit.mk b/target/product/core_64_bit.mk
index 971b6bd..76e2a36 100644
--- a/target/product/core_64_bit.mk
+++ b/target/product/core_64_bit.mk
@@ -22,12 +22,12 @@
 # For now this will allow 64-bit apps, but still compile all apps with JNI
 # for 32-bit only.
 
-# Copy the 32-bit primary, 64-bit secondary zygote startup script
-PRODUCT_COPY_FILES += system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+# Copy the 64-bit primary, 32-bit secondary zygote startup script
+PRODUCT_COPY_FILES += system/core/rootdir/init.zygote64_32.rc:root/init.zygote64_32.rc
 
-# Set the zygote property to select the 32-bit primary, 64-bit secondary script
+# Set the zygote property to select the 64-bit primary, 32-bit secondary script
 # This line must be parsed before the one in core_minimal.mk
-PRODUCT_DEFAULT_PROPERTY_OVERRIDES += ro.zygote=zygote32_64
+PRODUCT_DEFAULT_PROPERTY_OVERRIDES += ro.zygote=zygote64_32
 
 TARGET_SUPPORTS_32_BIT_APPS := true
 TARGET_SUPPORTS_64_BIT_APPS := true
diff --git a/target/product/core_minimal.mk b/target/product/core_minimal.mk
index 165f6ac..368e468 100644
--- a/target/product/core_minimal.mk
+++ b/target/product/core_minimal.mk
@@ -59,6 +59,8 @@
     libwilhelm \
     logd \
     make_ext4fs \
+    e2fsck \
+    resize2fs \
     mms-common \
     screencap \
     sensorservice \
@@ -74,7 +76,7 @@
 
 # The order of PRODUCT_BOOT_JARS matters.
 PRODUCT_BOOT_JARS := \
-    core \
+    core-libart \
     conscrypt \
     okhttp \
     core-junit \
diff --git a/target/product/core_tiny.mk b/target/product/core_tiny.mk
index 6ea0c3b..affef3c 100644
--- a/target/product/core_tiny.mk
+++ b/target/product/core_tiny.mk
@@ -64,6 +64,8 @@
     libdrmframework_jni \
     libdrmframework \
     make_ext4fs \
+    e2fsck \
+    resize2fs \
     nullwebview \
     screencap \
     sensorservice \
@@ -75,7 +77,7 @@
 
 # The order matters
 PRODUCT_BOOT_JARS := \
-    core \
+    core-libart \
     conscrypt \
     okhttp \
     core-junit \
diff --git a/target/product/full_x86_64.mk b/target/product/full_x86_64.mk
index cd401a8..d9c0c1e 100755
--- a/target/product/full_x86_64.mk
+++ b/target/product/full_x86_64.mk
@@ -23,10 +23,6 @@
 # that isn't a wifi connection. This will instruct init.rc to enable the
 # network connection so that you can use it with ADB
 
-PRODUCT_DEFAULT_PROPERTY_OVERRIDES += ro.zygote=zygote64_32
-PRODUCT_COPY_FILES += system/core/rootdir/init.zygote64_32.rc:root/init.zygote64_32.rc
-
-
 $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
 $(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base_telephony.mk)
 $(call inherit-product, $(SRC_TARGET_DIR)/board/generic_x86_64/device.mk)
diff --git a/target/product/runtime_common.mk b/target/product/runtime_common.mk
index faa6fe8..9ae182a 100644
--- a/target/product/runtime_common.mk
+++ b/target/product/runtime_common.mk
@@ -39,20 +39,3 @@
     libssl \
     libz \
     okhttp
-
-# host-only dependencies
-ifeq ($(WITH_HOST_DALVIK),true)
-    PRODUCT_PACKAGES += \
-        apache-xml-hostdex \
-        bouncycastle-hostdex \
-        conscrypt-hostdex \
-        dalvik \
-        libcrypto-host \
-        libexpat-host \
-        libicui18n-host \
-        libicuuc-host \
-        libjavacore \
-        libssl-host \
-        libz-host \
-        okhttp-hostdex
-endif
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 1a7c925..de4cf17 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -22,10 +22,4 @@
         dex2oat \
         oatdump
 
-# host-only dependencies
-ifeq ($(WITH_HOST_DALVIK),true)
-    PRODUCT_PACKAGES += \
-        core-libart-hostdex
-endif
-
 include $(SRC_TARGET_DIR)/product/runtime_common.mk
diff --git a/target/product/runtime_libdvm.mk b/target/product/runtime_libdvm.mk
index e7647b8..638d7d7 100644
--- a/target/product/runtime_libdvm.mk
+++ b/target/product/runtime_libdvm.mk
@@ -21,10 +21,4 @@
     libdvm \
     dexopt
 
-# host-only dependencies
-ifeq ($(WITH_HOST_DALVIK),true)
-    PRODUCT_PACKAGES += \
-        core-hostdex
-endif
-
 include $(SRC_TARGET_DIR)/product/runtime_common.mk
diff --git a/tools/droiddoc/templates-sdk/assets/css/default.css b/tools/droiddoc/templates-sdk/assets/css/default.css
index 1c45596..47cef96 100644
--- a/tools/droiddoc/templates-sdk/assets/css/default.css
+++ b/tools/droiddoc/templates-sdk/assets/css/default.css
@@ -444,7 +444,7 @@
   border-top: 1px solid #ccc;
   margin-top: 10px;
   padding-top:10px;
-  height: 30px; }
+  width:100%; }
 
 .content-footer .col-9 {
   margin-left:0;
@@ -455,9 +455,15 @@
 .content-footer.wrap {
   width:940px;
 }
+.content-footer .plus-container {
+  margin:5px 0 0;
+  text-align:right;
+  float:right;
+}
 
 .paging-links {
-  position: relative; }
+  position: relative;
+  height:30px; }
   .paging-links a {
     position: absolute; }
   .paging-links a,
@@ -482,7 +488,7 @@
     .paging-links .prev-page-link {
       left: -15px; }
     .paging-links .next-page-link {
-      right: 0px; }
+      right: 0; }
     .next-page-link:after,
     .start-class-link:after,
     .start-course-link:after,
@@ -499,6 +505,9 @@
     .next-page-link.inline:after {
       content: none; }
 
+  .content-footer .paging-links .next-page-link {
+    left:0;
+  }
 
   .training-nav-top a {
     display:block;
@@ -537,7 +546,6 @@
 
   .paging-links a.start-class-link {
     width:100%;
-    text-align:right;
   }
 
   /* list of classes on course landing page */
@@ -625,7 +633,6 @@
 
   .content-footer.next-class a.next-class-link {
     display:block;
-    float:right;
     text-transform:uppercase;
   }
 
diff --git a/tools/droiddoc/templates-sdk/assets/js/docs.js b/tools/droiddoc/templates-sdk/assets/js/docs.js
index fb9f5b2..42cd29e 100644
--- a/tools/droiddoc/templates-sdk/assets/js/docs.js
+++ b/tools/droiddoc/templates-sdk/assets/js/docs.js
@@ -332,11 +332,15 @@
                           .click(function() { return false; });
       if ($nextLink.length) {
         $('.next-class-link').attr('href',$nextLink.attr('href'))
-                             .removeClass("hide").append($nextLink.html());
+                             .removeClass("hide")
+                             .append(": " + $nextLink.html());
         $('.next-class-link').find('.new').empty();
       }
     } else {
-      $('.next-page-link').attr('href', $nextLink.attr('href')).removeClass("hide");
+      $('.next-page-link').attr('href', $nextLink.attr('href'))
+                          .removeClass("hide");
+      // for the footer link, also add the next page title
+      $('.content-footer .next-page-link').append(": " + $nextLink.html());
     }
 
     if (!startClass && $prevLink.length) {
@@ -2538,14 +2542,14 @@
 
 /* Adjust the scroll position to account for sticky header, only if the hash matches an id */
 function offsetScrollForSticky() {
-  var hash = location.hash;
-  var $matchingElement = $(hash);
+  var hash = escape(location.hash.substr(1));
+  var $matchingElement = $("#"+hash);
   // If there's no element with the hash as an ID, then look for an <a name=''> with it.
   if ($matchingElement.length < 1) {
-    $matchingElement = $('a[name="' + hash.substr(1) + '"]');
+    $matchingElement = $('a[name="' + hash + '"]');
   }
-  // Sanity check that hash is a real hash and that there's an element with that ID on the page
-  if ((hash.indexOf("#") == 0) && $matchingElement.length) {
+  // Sanity check that there's an element with that ID on the page
+  if ($matchingElement.length) {
     // If the position of the target element is near the top of the page (<20px, where we expect it
     // to be because we need to move it down 60px to become in view), then move it down 60px
     if (Math.abs($matchingElement.offset().top - $(window).scrollTop()) < 20) {
diff --git a/tools/droiddoc/templates-sdk/docpage.cs b/tools/droiddoc/templates-sdk/docpage.cs
index 97ad8f1..f1e34e2 100644
--- a/tools/droiddoc/templates-sdk/docpage.cs
+++ b/tools/droiddoc/templates-sdk/docpage.cs
@@ -157,25 +157,9 @@
                     if:fullpage ?>wrap<?cs
                     else ?>layout-content-row<?cs /if ?>"
                     itemscope itemtype="http://schema.org/SiteNavigationElement">
-        <div class="layout-content-col <?cs
-                    if:fullpage ?>col-16<?cs
-                    elif:training||guide ?>col-8<?cs
-                    else ?>col-9<?cs /if ?>" style="padding-top:4px">
-          <?cs if:!page.noplus ?><?cs if:fullpage ?><style>#___plusone_0 {float:right !important;}</style><?cs /if ?>
-            <div class="g-plusone" data-size="medium"></div>
-          <?cs /if ?>
-        </div>
         <?cs if:!fullscreen ?>
-        <div class="paging-links layout-content-col col-4">
+        <div class="paging-links layout-content-col col-10">
           <?cs if:(design||training||walkthru) && !page.landing && !page.trainingcourse && !footer.hide ?>
-            <a href="#" class="prev-page-link hide"
-                zh-tw-lang="上一堂課"
-                zh-cn-lang="上一课"
-                ru-lang="Предыдущий"
-                ko-lang="이전"
-                ja-lang="前へ"
-                es-lang="Anterior"
-                >Previous</a>
             <a href="#" class="next-page-link hide"
                 zh-tw-lang="下一堂課"
                 zh-cn-lang="下一课"
@@ -194,13 +178,18 @@
                 >Get started</a>
           <?cs /if ?>
         </div>
+        <div class="layout-content-col plus-container col-2" >
+          <?cs if:!page.noplus ?><?cs if:fullpage ?><style>#___plusone_0 {float:right !important;}</style><?cs /if ?>
+            <div class="g-plusone" data-size="medium"></div>
+          <?cs /if ?>
+        </div>
         <?cs /if ?>
       </div>
 
       <?cs # for training classes, provide a different kind of link when the next page is a different class ?>
       <?cs if:training && !page.article ?>
       <div class="layout-content-row content-footer next-class" style="display:none" itemscope itemtype="http://schema.org/SiteNavigationElement">
-          <a href="#" class="next-class-link hide">Next class: </a>
+          <a href="#" class="next-class-link hide">Next class</a>
       </div>
       <?cs /if ?>
 
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 6b593e3..f8cba44 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -256,6 +256,9 @@
       build_command.append(fc_config)
     elif "selinux_fc" in prop_dict:
       build_command.append(prop_dict["selinux_fc"])
+  elif fs_type.startswith("f2fs"):
+    build_command = ["mkf2fsuserimg.sh"]
+    build_command.extend([out_file, prop_dict["partition_size"]])
   else:
     build_command = ["mkyaffs2image", "-f"]
     if prop_dict.get("mkyaffs2_extra_flags", None):
@@ -325,7 +328,9 @@
     copy_prop("fs_type", "fs_type")
     copy_prop("system_size", "partition_size")
   elif mount_point == "data":
+    # Copy the generic fs type first, override with specific one if available.
     copy_prop("fs_type", "fs_type")
+    copy_prop("userdata_fs_type", "fs_type")
     copy_prop("userdata_size", "partition_size")
   elif mount_point == "cache":
     copy_prop("cache_fs_type", "fs_type")
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index efba1fe..701a9cb 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -968,7 +968,8 @@
 
 # map recovery.fstab's fs_types to mount/format "partition types"
 PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD",
-                    "ext4": "EMMC", "emmc": "EMMC" }
+                    "ext4": "EMMC", "emmc": "EMMC",
+                    "f2fs": "EMMC" }
 
 def GetTypeAndDevice(mount_point, info):
   fstab = info["fstab"]
@@ -1008,14 +1009,14 @@
   p.communicate()
   assert p.returncode == 0, "Couldn't compress patch"
 
-def MakeSystemPatch(source_file, target_file):
+def MakePartitionPatch(source_file, target_file, partition):
   with tempfile.NamedTemporaryFile() as output_file:
     XDelta3(source_file.name, target_file.name, output_file.name)
     XZ(output_file.name)
     with open(output_file.name + ".xz") as patch_file:
       patch_data = patch_file.read()
       os.unlink(patch_file.name)
-      return File("system.muimg.p", patch_data)
+      return File(partition + ".muimg.p", patch_data)
 
 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
                       info_dict=None):
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 43e8542..8620812 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -203,11 +203,10 @@
                           p.device, p.length, p.mount_point))
 
   def WipeBlockDevice(self, partition):
-    if partition != "/system":
-      raise ValueError(("WipeBlockDevice currently only works "
-                        "on /system, not %s\n") % (partition,))
+    if partition not in ("/system", "/vendor"):
+      raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
     fstab = self.info.get("fstab", None)
-    size = self.info.get("system_size", None)
+    size = self.info.get(partition.lstrip("/") + "_size", None)
     device = fstab[partition].device
 
     self.script.append('wipe_block_device("%s", %s);' % (device, size))
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 2ca9316..6043355 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -59,9 +59,21 @@
   data = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict, sparse=sparse)
   common.ZipWriteStr(output_zip, "system.img", data)
 
-
 def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
-  print "creating system.img..."
+  return CreateImage(input_dir, info_dict, "system",
+                     sparse=sparse, map_file=map_file)
+
+def AddVendor(output_zip, sparse=True):
+  data = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict, sparse=sparse)
+  common.ZipWriteStr(output_zip, "vendor.img", data)
+
+def BuildVendor(input_dir, info_dict, sparse=True, map_file=None):
+  return CreateImage(input_dir, info_dict, "vendor",
+                     sparse=sparse, map_file=map_file)
+
+
+def CreateImage(input_dir, info_dict, what, sparse=True, map_file=None):
+  print "creating " + what + ".img..."
 
   img = tempfile.NamedTemporaryFile()
 
@@ -69,8 +81,8 @@
   # mkyaffs2image.  It wants "system" but we have a directory named
   # "SYSTEM", so create a symlink.
   try:
-    os.symlink(os.path.join(input_dir, "SYSTEM"),
-               os.path.join(input_dir, "system"))
+    os.symlink(os.path.join(input_dir, what.upper()),
+               os.path.join(input_dir, what))
   except OSError, e:
       # bogus error on my mac version?
       #   File "./build/tools/releasetools/img_from_target_files", line 86, in AddSystem
@@ -79,22 +91,28 @@
     if (e.errno == errno.EEXIST):
       pass
 
-  image_props = build_image.ImagePropFromGlobalDict(info_dict, "system")
+  image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
   fstab = info_dict["fstab"]
   if fstab:
-    image_props["fs_type" ] = fstab["/system"].fs_type
+    image_props["fs_type" ] = fstab["/" + what].fs_type
 
-  fs_config = os.path.join(input_dir, "META/filesystem_config.txt")
+  if what == "system":
+    fs_config_prefix = ""
+  else:
+    fs_config_prefix = what + "_"
+
+  fs_config = os.path.join(
+      input_dir, "META/" + fs_config_prefix + "filesystem_config.txt")
   if not os.path.exists(fs_config): fs_config = None
 
   fc_config = os.path.join(input_dir, "BOOT/RAMDISK/file_contexts")
   if not os.path.exists(fc_config): fc_config = None
 
-  succ = build_image.BuildImage(os.path.join(input_dir, "system"),
+  succ = build_image.BuildImage(os.path.join(input_dir, what),
                                 image_props, img.name,
                                 fs_config=fs_config,
                                 fc_config=fc_config)
-  assert succ, "build system.img image failed"
+  assert succ, "build " + what + ".img image failed"
 
   mapdata = None
 
@@ -104,7 +122,7 @@
   else:
     success, name = build_image.UnsparseImage(img.name, replace=False)
     if not success:
-      assert False, "unsparsing system.img failed"
+      assert False, "unsparsing " + what + ".img failed"
 
     if map_file:
       mmap = tempfile.NamedTemporaryFile()
@@ -131,45 +149,6 @@
     return mapdata, data
 
 
-def AddVendor(output_zip):
-  """Turn the contents of VENDOR into vendor.img and store it in
-  output_zip."""
-
-  image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
-                                                    "vendor")
-  # The build system has to explicitly request for vendor.img.
-  if "fs_type" not in image_props:
-    return
-
-  print "creating vendor.img..."
-
-  img = tempfile.NamedTemporaryFile()
-
-  # The name of the directory it is making an image out of matters to
-  # mkyaffs2image.  It wants "vendor" but we have a directory named
-  # "VENDOR", so create a symlink or an empty directory if VENDOR does not
-  # exist.
-  if not os.path.exists(os.path.join(OPTIONS.input_tmp, "vendor")):
-    if os.path.exists(os.path.join(OPTIONS.input_tmp, "VENDOR")):
-      os.symlink(os.path.join(OPTIONS.input_tmp, "VENDOR"),
-                 os.path.join(OPTIONS.input_tmp, "vendor"))
-    else:
-      os.mkdir(os.path.join(OPTIONS.input_tmp, "vendor"))
-
-  img = tempfile.NamedTemporaryFile()
-
-  fstab = OPTIONS.info_dict["fstab"]
-  if fstab:
-    image_props["fs_type" ] = fstab["/vendor"].fs_type
-  succ = build_image.BuildImage(os.path.join(OPTIONS.input_tmp, "vendor"),
-                                image_props, img.name)
-  assert succ, "build vendor.img image failed"
-
-  common.CheckSize(img.name, "vendor.img", OPTIONS.info_dict)
-  output_zip.write(img.name, "vendor.img")
-  img.close()
-
-
 def AddUserdata(output_zip):
   """Create an empty userdata image and store it in output_zip."""
 
@@ -287,10 +266,21 @@
   if recovery_image:
     recovery_image.AddToZip(output_zip)
 
+  def banner(s):
+    print "\n\n++++ " + s + " ++++\n\n"
+
   if not bootable_only:
+    banner("AddSystem")
     AddSystem(output_zip)
-    AddVendor(output_zip)
+    try:
+      input_zip.getinfo("VENDOR/")
+      banner("AddVendor")
+      AddVendor(output_zip)
+    except KeyError:
+      pass   # no vendor partition for this device
+    banner("AddUserdata")
     AddUserdata(output_zip)
+    banner("AddCache")
     AddCache(output_zip)
     CopyInfo(output_zip)
 
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
index 87de2f6..85888f8 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files
@@ -159,50 +159,21 @@
     return result
   return None
 
-class Item:
-  """Items represent the metadata (user, group, mode) of files and
-  directories in the system image."""
-  ITEMS = {}
-  def __init__(self, name, dir=False):
-    self.name = name
-    self.uid = None
-    self.gid = None
-    self.mode = None
-    self.selabel = None
-    self.capabilities = None
-    self.dir = dir
+class ItemSet:
+  def __init__(self, partition, fs_config):
+    self.partition = partition
+    self.fs_config = fs_config
+    self.ITEMS = {}
 
-    if name:
-      self.parent = Item.Get(os.path.dirname(name), dir=True)
-      self.parent.children.append(self)
-    else:
-      self.parent = None
-    if dir:
-      self.children = []
+  def Get(self, name, dir=False):
+    if name not in self.ITEMS:
+      self.ITEMS[name] = Item(self, name, dir=dir)
+    return self.ITEMS[name]
 
-  def Dump(self, indent=0):
-    if self.uid is not None:
-      print "%s%s %d %d %o" % ("  "*indent, self.name, self.uid, self.gid, self.mode)
-    else:
-      print "%s%s %s %s %s" % ("  "*indent, self.name, self.uid, self.gid, self.mode)
-    if self.dir:
-      print "%s%s" % ("  "*indent, self.descendants)
-      print "%s%s" % ("  "*indent, self.best_subtree)
-      for i in self.children:
-        i.Dump(indent=indent+1)
-
-  @classmethod
-  def Get(cls, name, dir=False):
-    if name not in cls.ITEMS:
-      cls.ITEMS[name] = Item(name, dir=dir)
-    return cls.ITEMS[name]
-
-  @classmethod
-  def GetMetadata(cls, input_zip):
-
+  def GetMetadata(self, input_zip):
     # The target_files contains a record of what the uid,
     # gid, and mode are supposed to be.
-    output = input_zip.read("META/filesystem_config.txt")
+    output = input_zip.read(self.fs_config)
 
     for line in output.split("\n"):
       if not line: continue
@@ -220,7 +191,7 @@
         if key == "capabilities":
           capabilities = value
 
-      i = cls.ITEMS.get(name, None)
+      i = self.ITEMS.get(name, None)
       if i is not None:
         i.uid = int(uid)
         i.gid = int(gid)
@@ -231,11 +202,44 @@
           i.children.sort(key=lambda i: i.name)
 
     # set metadata for the files generated by this script.
-    i = cls.ITEMS.get("system/recovery-from-boot.p", None)
+    i = self.ITEMS.get("system/recovery-from-boot.p", None)
     if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0644, None, None
-    i = cls.ITEMS.get("system/etc/install-recovery.sh", None)
+    i = self.ITEMS.get("system/etc/install-recovery.sh", None)
     if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0544, None, None
 
+
+class Item:
+  """Items represent the metadata (user, group, mode) of files and
+  directories in the system image."""
+  def __init__(self, itemset, name, dir=False):
+    self.itemset = itemset
+    self.name = name
+    self.uid = None
+    self.gid = None
+    self.mode = None
+    self.selabel = None
+    self.capabilities = None
+    self.dir = dir
+
+    if name:
+      self.parent = itemset.Get(os.path.dirname(name), dir=True)
+      self.parent.children.append(self)
+    else:
+      self.parent = None
+    if dir:
+      self.children = []
+
+  def Dump(self, indent=0):
+    if self.uid is not None:
+      print "%s%s %d %d %o" % ("  "*indent, self.name, self.uid, self.gid, self.mode)
+    else:
+      print "%s%s %s %s %s" % ("  "*indent, self.name, self.uid, self.gid, self.mode)
+    if self.dir:
+      print "%s%s" % ("  "*indent, self.descendants)
+      print "%s%s" % ("  "*indent, self.best_subtree)
+      for i in self.children:
+        i.Dump(indent=indent+1)
+
   def CountChildMetadata(self):
     """Count up the (uid, gid, mode, selabel, capabilities) tuples for
     all children and determine the best strategy for using set_perm_recursive and
@@ -320,9 +324,8 @@
     recurse(self, (-1, -1, -1, -1, None, None))
 
 
-def CopySystemFiles(input_zip, output_zip=None,
-                    substitute=None):
-  """Copies files underneath system/ in the input zip to the output
+def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
+  """Copies files for the partition in the input zip to the output
   zip.  Populates the Item class with their metadata, and returns a
   list of symlinks.  output_zip may be None, in which case the copy is
   skipped (but the other side effects still happen).  substitute is an
@@ -332,15 +335,17 @@
 
   symlinks = []
 
+  partition = itemset.partition
+
   for info in input_zip.infolist():
-    if info.filename.startswith("SYSTEM/"):
+    if info.filename.startswith(partition.upper() + "/"):
       basefilename = info.filename[7:]
       if IsSymlink(info):
         symlinks.append((input_zip.read(info.filename),
-                         "/system/" + basefilename))
+                         "/" + partition + "/" + basefilename))
       else:
         info2 = copy.copy(info)
-        fn = info2.filename = "system/" + basefilename
+        fn = info2.filename = partition + "/" + basefilename
         if substitute and fn in substitute and substitute[fn] is None:
           continue
         if output_zip is not None:
@@ -350,9 +355,9 @@
             data = input_zip.read(info.filename)
           output_zip.writestr(info2, data)
         if fn.endswith("/"):
-          Item.Get(fn[:-1], dir=True)
+          itemset.Get(fn[:-1], dir=True)
         else:
-          Item.Get(fn, dir=False)
+          itemset.Get(fn, dir=False)
 
   symlinks.sort()
   return symlinks
@@ -387,6 +392,13 @@
   except KeyError:
     return False
 
+def HasVendorPartition(target_files_zip):
+  try:
+    target_files_zip.getinfo("VENDOR/")
+    return True
+  except KeyError:
+    return False
+
 def GetOemProperty(name, oem_props, oem_dict, info_dict):
   if oem_props is not None and name in oem_props:
     return oem_dict[name]
@@ -489,10 +501,13 @@
 
   if OPTIONS.wipe_user_data:
     system_progress -= 0.1
+  if HasVendorPartition(input_zip):
+    system_progress -= 0.1
 
   if "selinux_fc" in OPTIONS.info_dict:
     WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip)
 
+  system_items = ItemSet("system", "META/filesystem_config.txt")
   script.ShowProgress(system_progress, 0)
   if block_based:
     mapdata, data = img_from_target_files.BuildSystem(
@@ -510,7 +525,7 @@
       script.UnpackPackageDir("recovery", "/system")
     script.UnpackPackageDir("system", "/system")
 
-    symlinks = CopySystemFiles(input_zip, output_zip)
+    symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
     script.MakeSymlinks(symlinks)
 
   boot_img = common.GetBootableImage("boot.img", "boot.img",
@@ -519,13 +534,37 @@
   if not block_based:
     def output_sink(fn, data):
       common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-      Item.Get("system/" + fn, dir=False)
+      system_items.Get("system/" + fn, dir=False)
 
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
                              recovery_img, boot_img)
 
-    Item.GetMetadata(input_zip)
-    Item.Get("system").SetPermissions(script)
+    system_items.GetMetadata(input_zip)
+    system_items.Get("system").SetPermissions(script)
+
+  if HasVendorPartition(input_zip):
+    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
+    script.ShowProgress(0.1, 0)
+
+    if block_based:
+      mapdata, data = img_from_target_files.BuildVendor(
+          OPTIONS.input_tmp, OPTIONS.info_dict,
+          sparse=False, map_file=True)
+
+      common.ZipWriteStr(output_zip, "vendor.map", mapdata)
+      common.ZipWriteStr(output_zip, "vendor.muimg", data)
+      script.WipeBlockDevice("/vendor")
+      script.WriteRawImage("/vendor", "vendor.muimg", mapfn="vendor.map")
+    else:
+      script.FormatPartition("/vendor")
+      script.Mount("/vendor")
+      script.UnpackPackageDir("vendor", "/vendor")
+
+      symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
+      script.MakeSymlinks(symlinks)
+
+      vendor_items.GetMetadata(input_zip)
+      vendor_items.Get("vendor").SetPermissions(script)
 
   common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
   common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
@@ -544,7 +583,7 @@
   if OPTIONS.wipe_user_data:
     script.ShowProgress(0.1, 10)
     script.FormatPartition("/data")
-    
+
   if OPTIONS.two_step:
     script.AppendExtra("""
 set_stage("%(bcb_dev)s", "");
@@ -571,14 +610,15 @@
                      "".join(["%s=%s\n" % kv
                               for kv in sorted(metadata.iteritems())]))
 
-def LoadSystemFiles(z):
-  """Load all the files from SYSTEM/... in a given target-files
+def LoadPartitionFiles(z, partition):
+  """Load all the files from the given partition in a given target-files
   ZipFile, and return a dict of {filename: File object}."""
   out = {}
+  prefix = partition.upper() + "/"
   for info in z.infolist():
-    if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
+    if info.filename.startswith(prefix) and not IsSymlink(info):
       basefilename = info.filename[7:]
-      fn = "system/" + basefilename
+      fn = partition + "/" + basefilename
       data = z.read(info.filename)
       out[fn] = common.File(fn, data)
   return out
@@ -602,6 +642,45 @@
     known_paths.add(path)
     dirs.pop()
 
+class BlockDifference:
+  def __init__(self, partition, builder, output_zip):
+    with tempfile.NamedTemporaryFile() as src_file:
+      with tempfile.NamedTemporaryFile() as tgt_file:
+        print "building source " + partition + " image..."
+        src_file = tempfile.NamedTemporaryFile()
+        src_mapdata, src_data = builder(OPTIONS.source_tmp,
+                                        OPTIONS.source_info_dict,
+                                        sparse=False, map_file=True)
+
+        self.src_sha1 = sha1(src_data).hexdigest()
+        print "source " + partition + " sha1:", self.src_sha1
+        src_file.write(src_data)
+
+        print "building target " + partition + " image..."
+        tgt_file = tempfile.NamedTemporaryFile()
+        tgt_mapdata, tgt_data = builder(OPTIONS.target_tmp,
+                                        OPTIONS.target_info_dict,
+                                        sparse=False, map_file=True)
+        self.tgt_sha1 = sha1(tgt_data).hexdigest()
+        print "target " + partition + " sha1:", self.tgt_sha1
+        tgt_len = len(tgt_data)
+        tgt_file.write(tgt_data)
+
+        system_type, self.device = common.GetTypeAndDevice("/" + partition,
+                                                           OPTIONS.info_dict)
+        self.patch = common.MakePartitionPatch(src_file, tgt_file, partition)
+
+        TestBlockPatch(src_data, src_mapdata, self.patch.data,
+                       tgt_mapdata, self.tgt_sha1)
+        src_data = None
+        tgt_data = None
+
+        self.patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
+        self.src_mapfilename = self.patch.name + ".src.map"
+        common.ZipWriteStr(output_zip, self.src_mapfilename, src_mapdata)
+        self.tgt_mapfilename = self.patch.name + ".tgt.map"
+        common.ZipWriteStr(output_zip, self.tgt_mapfilename, tgt_mapdata)
+
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
   source_version = OPTIONS.source_info_dict["recovery_api_version"]
   target_version = OPTIONS.target_info_dict["recovery_api_version"]
@@ -648,40 +727,13 @@
       "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
   updating_recovery = (source_recovery.data != target_recovery.data)
 
-  with tempfile.NamedTemporaryFile() as src_file:
-    with tempfile.NamedTemporaryFile() as tgt_file:
-      print "building source system image..."
-      src_file = tempfile.NamedTemporaryFile()
-      src_mapdata, src_data = img_from_target_files.BuildSystem(
-          OPTIONS.source_tmp, OPTIONS.source_info_dict,
-          sparse=False, map_file=True)
-
-      src_sys_sha1 = sha1(src_data).hexdigest()
-      print "source system sha1:", src_sys_sha1
-      src_file.write(src_data)
-
-      print "building target system image..."
-      tgt_file = tempfile.NamedTemporaryFile()
-      tgt_mapdata, tgt_data = img_from_target_files.BuildSystem(
-          OPTIONS.target_tmp, OPTIONS.target_info_dict,
-          sparse=False, map_file=True)
-      tgt_sys_sha1 = sha1(tgt_data).hexdigest()
-      print "target system sha1:", tgt_sys_sha1
-      tgt_sys_len = len(tgt_data)
-      tgt_file.write(tgt_data)
-
-      system_type, system_device = common.GetTypeAndDevice("/system", OPTIONS.info_dict)
-      system_patch = common.MakeSystemPatch(src_file, tgt_file)
-
-      TestBlockPatch(src_data, src_mapdata, system_patch.data, tgt_mapdata, tgt_sys_sha1)
-      src_data = None
-      tgt_data = None
-
-      system_patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
-      src_mapfilename = system_patch.name + ".src.map"
-      common.ZipWriteStr(output_zip, src_mapfilename, src_mapdata)
-      tgt_mapfilename = system_patch.name + ".tgt.map"
-      common.ZipWriteStr(output_zip, tgt_mapfilename, tgt_mapdata)
+  system_diff = BlockDifference("system", img_from_target_files.BuildSystem,
+                                output_zip)
+  if HasVendorPartition(target_zip):
+    if not HasVendorPartition(source_zip):
+      raise RuntimeError("can't generate incremental that adds /vendor")
+    vendor_diff = BlockDifference("vendor", img_from_target_files.BuildVendor,
+                                  output_zip)
 
   oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
   oem_dict = None
@@ -774,12 +826,23 @@
 
   device_specific.IncrementalOTA_InstallBegin()
 
+  if HasVendorPartition(target_zip):
+    script.Print("Patching vendor image...")
+    script.ShowProgress(0.1, 0)
+    script.Syspatch(vendor_diff.device,
+                    vendor_diff.tgt_mapfilename, vendor_diff.tgt_sha1,
+                    vendor_diff.src_mapfilename, vendor_diff.src_sha1,
+                    vendor_diff.patch.name)
+    sys_progress = 0.8
+  else:
+    sys_progress = 0.9
+
   script.Print("Patching system image...")
-  script.ShowProgress(0.9, 0)
-  script.Syspatch(system_device,
-                  tgt_mapfilename, tgt_sys_sha1,
-                  src_mapfilename, src_sys_sha1,
-                  system_patch.name)
+  script.ShowProgress(sys_progress, 0)
+  script.Syspatch(system_diff.device,
+                  system_diff.tgt_mapfilename, system_diff.tgt_sha1,
+                  system_diff.src_mapfilename, system_diff.src_sha1,
+                  system_diff.patch.name)
 
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
@@ -881,6 +944,127 @@
   print "test of system image patch succeeded"
 
 
+class FileDifference:
+  def __init__(self, partition, source_zip, target_zip, output_zip):
+    print "Loading target..."
+    self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
+    print "Loading source..."
+    self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
+
+    self.verbatim_targets = verbatim_targets = []
+    self.patch_list = patch_list = []
+    diffs = []
+    self.renames = renames = {}
+    known_paths = set()
+    largest_source_size = 0
+
+    matching_file_cache = {}
+    for fn, sf in source_data.items():
+      assert fn == sf.name
+      matching_file_cache["path:" + fn] = sf
+      if fn in target_data.keys():
+        AddToKnownPaths(fn, known_paths)
+      # Only allow eligibility for filename/sha matching
+      # if there isn't a perfect path match.
+      if target_data.get(sf.name) is None:
+        matching_file_cache["file:" + fn.split("/")[-1]] = sf
+        matching_file_cache["sha:" + sf.sha1] = sf
+
+    for fn in sorted(target_data.keys()):
+      tf = target_data[fn]
+      assert fn == tf.name
+      sf = ClosestFileMatch(tf, matching_file_cache, renames)
+      if sf is not None and sf.name != tf.name:
+        print "File has moved from " + sf.name + " to " + tf.name
+        renames[sf.name] = tf
+
+      if sf is None or fn in OPTIONS.require_verbatim:
+        # This file should be included verbatim
+        if fn in OPTIONS.prohibit_verbatim:
+          raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
+        print "send", fn, "verbatim"
+        tf.AddToZip(output_zip)
+        verbatim_targets.append((fn, tf.size))
+        if fn in target_data.keys():
+          AddToKnownPaths(fn, known_paths)
+      elif tf.sha1 != sf.sha1:
+        # File is different; consider sending as a patch
+        diffs.append(common.Difference(tf, sf))
+      else:
+        # Target file data identical to source (may still be renamed)
+        pass
+
+    common.ComputeDifferences(diffs)
+
+    for diff in diffs:
+      tf, sf, d = diff.GetPatch()
+      path = "/".join(tf.name.split("/")[:-1])
+      if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
+          path not in known_paths:
+        # patch is almost as big as the file; don't bother patching
+        # or a patch + rename cannot take place due to the target
+        # directory not existing
+        tf.AddToZip(output_zip)
+        verbatim_targets.append((tf.name, tf.size))
+        if sf.name in renames:
+          del renames[sf.name]
+        AddToKnownPaths(tf.name, known_paths)
+      else:
+        common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
+        patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
+        largest_source_size = max(largest_source_size, sf.size)
+
+    self.largest_source_size = largest_source_size
+
+  def EmitVerification(self, script):
+    so_far = 0
+    for tf, sf, size, patch_sha in self.patch_list:
+      if tf.name != sf.name:
+        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
+      script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
+      so_far += sf.size
+    return so_far
+
+  def RemoveUnneededFiles(self, script, extras=()):
+    script.DeleteFiles(["/"+i[0] for i in self.verbatim_targets] +
+                       ["/"+i for i in sorted(self.source_data)
+                              if i not in self.target_data and
+                              i not in self.renames] +
+                       list(extras))
+
+  def TotalPatchSize(self):
+    return sum(i[1].size for i in self.patch_list)
+
+  def EmitPatches(self, script, total_patch_size, so_far):
+    self.deferred_patch_list = deferred_patch_list = []
+    for item in self.patch_list:
+      tf, sf, size, _ = item
+      if tf.name == "system/build.prop":
+        deferred_patch_list.append(item)
+        continue
+      if (sf.name != tf.name):
+        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
+      script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
+      so_far += tf.size
+      script.SetProgress(so_far / total_patch_size)
+    return so_far
+
+  def EmitDeferredPatches(self, script):
+    for item in self.deferred_patch_list:
+      tf, sf, size, _ = item
+      script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
+    script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
+
+  def EmitRenames(self, script):
+    if len(self.renames) > 0:
+      script.Print("Renaming files...")
+      for src, tgt in self.renames.iteritems():
+        print "Renaming " + src + " to " + tgt.name
+        script.RenameFile(src, tgt.name)
+
+
+
+
 def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
   target_has_recovery_patch = HasRecoveryPatch(target_zip)
   source_has_recovery_patch = HasRecoveryPatch(source_zip)
@@ -923,75 +1107,13 @@
       metadata=metadata,
       info_dict=OPTIONS.info_dict)
 
-  print "Loading target..."
-  target_data = LoadSystemFiles(target_zip)
-  print "Loading source..."
-  source_data = LoadSystemFiles(source_zip)
-
-  verbatim_targets = []
-  patch_list = []
-  diffs = []
-  renames = {}
-  known_paths = set()
-  largest_source_size = 0
-
-  matching_file_cache = {}
-  for fn, sf in source_data.items():
-    assert fn == sf.name
-    matching_file_cache["path:" + fn] = sf
-    if fn in target_data.keys():
-      AddToKnownPaths(fn, known_paths)
-    # Only allow eligibility for filename/sha matching
-    # if there isn't a perfect path match.
-    if target_data.get(sf.name) is None:
-      matching_file_cache["file:" + fn.split("/")[-1]] = sf
-      matching_file_cache["sha:" + sf.sha1] = sf
-
-  for fn in sorted(target_data.keys()):
-    tf = target_data[fn]
-    assert fn == tf.name
-    sf = ClosestFileMatch(tf, matching_file_cache, renames)
-    if sf is not None and sf.name != tf.name:
-      print "File has moved from " + sf.name + " to " + tf.name
-      renames[sf.name] = tf
-
-    if sf is None or fn in OPTIONS.require_verbatim:
-      # This file should be included verbatim
-      if fn in OPTIONS.prohibit_verbatim:
-        raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
-      print "send", fn, "verbatim"
-      tf.AddToZip(output_zip)
-      verbatim_targets.append((fn, tf.size))
-      if fn in target_data.keys():
-        AddToKnownPaths(fn, known_paths)
-    elif tf.sha1 != sf.sha1:
-      # File is different; consider sending as a patch
-      diffs.append(common.Difference(tf, sf))
-    else:
-      # Target file data identical to source (may still be renamed)
-      pass
-
-  common.ComputeDifferences(diffs)
-
-  for diff in diffs:
-    tf, sf, d = diff.GetPatch()
-    path = "/".join(tf.name.split("/")[:-1])
-    if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
-        path not in known_paths:
-      # patch is almost as big as the file; don't bother patching
-      # or a patch + rename cannot take place due to the target
-      # directory not existing
-      tf.AddToZip(output_zip)
-      verbatim_targets.append((tf.name, tf.size))
-      if sf.name in renames:
-        del renames[sf.name]
-      AddToKnownPaths(tf.name, known_paths)
-    else:
-      common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
-      patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
-      largest_source_size = max(largest_source_size, sf.size)
-
+  system_diff = FileDifference("system", source_zip, target_zip, output_zip)
   script.Mount("/system")
+  if HasVendorPartition(target_zip):
+    vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
+    script.Mount("/vendor")
+  else:
+    vendor_diff = None
 
   target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict)
   source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict)
@@ -1075,13 +1197,9 @@
   device_specific.IncrementalOTA_VerifyBegin()
 
   script.ShowProgress(0.1, 0)
-  so_far = 0
-
-  for tf, sf, size, patch_sha in patch_list:
-    if tf.name != sf.name:
-      script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-    script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
-    so_far += sf.size
+  so_far = system_diff.EmitVerification(script)
+  if vendor_diff:
+    so_far += vendor_diff.EmitVerification(script)
 
   if updating_boot:
     d = common.Difference(target_boot, source_boot)
@@ -1099,8 +1217,12 @@
                        target_boot.size, target_boot.sha1))
     so_far += source_boot.size
 
-  if patch_list or updating_recovery or updating_boot:
-    script.CacheFreeSpaceCheck(largest_source_size)
+  size = []
+  if system_diff.patch_list: size.append(system_diff.largest_source_size)
+  if vendor_diff:
+    if vendor_diff.patch_list: size.append(vendor_diff.largest_source_size)
+  if size or updating_recovery or updating_boot:
+    script.CacheFreeSpaceCheck(max(size))
 
   device_specific.IncrementalOTA_VerifyEnd()
 
@@ -1122,30 +1244,22 @@
     print "writing full boot image (forced by two-step mode)"
 
   script.Print("Removing unneeded files...")
-  script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
-                     ["/"+i for i in sorted(source_data)
-                            if i not in target_data and
-                            i not in renames] +
-                     ["/system/recovery.img"])
+  system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
+  if vendor_diff:
+    vendor_diff.RemoveUnneededFiles(script)
 
   script.ShowProgress(0.8, 0)
-  total_patch_size = float(sum([i[1].size for i in patch_list]) + 1)
+  total_patch_size = 1.0 + system_diff.TotalPatchSize()
+  if vendor_diff:
+    total_patch_size += vendor_diff.TotalPatchSize()
   if updating_boot:
     total_patch_size += target_boot.size
-  so_far = 0
 
   script.Print("Patching system files...")
-  deferred_patch_list = []
-  for item in patch_list:
-    tf, sf, size, _ = item
-    if tf.name == "system/build.prop":
-      deferred_patch_list.append(item)
-      continue
-    if (sf.name != tf.name):
-      script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-    script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
-    so_far += tf.size
-    script.SetProgress(so_far / total_patch_size)
+  so_far = system_diff.EmitPatches(script, total_patch_size, 0)
+  if vendor_diff:
+    script.Print("Patching vendor files...")
+    so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
 
   if not OPTIONS.two_step:
     if updating_boot:
@@ -1166,6 +1280,10 @@
     else:
       print "boot image unchanged; skipping."
 
+  system_items = ItemSet("system", "META/filesystem_config.txt")
+  if vendor_diff:
+    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
+
   if updating_recovery:
     # Recovery is generated as a patch using both the boot image
     # (which contains the same linux kernel as recovery) and the file
@@ -1179,7 +1297,7 @@
     if not target_has_recovery_patch:
       def output_sink(fn, data):
         common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-        Item.Get("system/" + fn, dir=False)
+        system_items.Get("system/" + fn, dir=False)
 
       common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
                                target_recovery, target_boot)
@@ -1191,16 +1309,24 @@
 
   script.ShowProgress(0.1, 10)
 
-  target_symlinks = CopySystemFiles(target_zip, None)
+  target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
+  if vendor_diff:
+    target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
+
+  temp_script = script.MakeTemporary()
+  system_items.GetMetadata(target_zip)
+  system_items.Get("system").SetPermissions(temp_script)
+  if vendor_diff:
+    vendor_items.GetMetadata(target_zip)
+    vendor_items.Get("vendor").SetPermissions(temp_script)
+
+  # Note that this call will mess up the trees of Items, so make sure
+  # we're done with them.
+  source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
+  if vendor_diff:
+    source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
 
   target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
-  temp_script = script.MakeTemporary()
-  Item.GetMetadata(target_zip)
-  Item.Get("system").SetPermissions(temp_script)
-
-  # Note that this call will mess up the tree of Items, so make sure
-  # we're done with it.
-  source_symlinks = CopySystemFiles(source_zip, None)
   source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
 
   # Delete all the symlinks in source that aren't in target.  This
@@ -1212,20 +1338,20 @@
       to_delete.append(link)
   script.DeleteFiles(to_delete)
 
-  if verbatim_targets:
-    script.Print("Unpacking new files...")
+  if system_diff.verbatim_targets:
+    script.Print("Unpacking new system files...")
     script.UnpackPackageDir("system", "/system")
+  if vendor_diff and vendor_diff.verbatim_targets:
+    script.Print("Unpacking new vendor files...")
+    script.UnpackPackageDir("vendor", "/vendor")
 
   if updating_recovery and not target_has_recovery_patch:
     script.Print("Unpacking new recovery...")
     script.UnpackPackageDir("recovery", "/system")
 
-  if len(renames) > 0:
-    script.Print("Renaming files...")
-
-  for src in renames:
-    print "Renaming " + src + " to " + renames[src].name
-    script.RenameFile(src, renames[src].name)
+  system_diff.EmitRenames(script)
+  if vendor_diff:
+    vendor_diff.EmitRenames(script)
 
   script.Print("Symlinks and permissions...")
 
@@ -1256,10 +1382,7 @@
   # device can still come up, it appears to be the old build and will
   # get set the OTA package again to retry.
   script.Print("Patching remaining system files...")
-  for item in deferred_patch_list:
-    tf, sf, size, _ = item
-    script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
-  script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
+  system_diff.EmitDeferredPatches(script)
 
   if OPTIONS.wipe_user_data:
     script.Print("Erasing user data...")