Merge "Add tzdatacheck to image"
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 38f04f1..0294cf2 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -87,6 +87,7 @@
 # file, tag the module as "gnu".  Search for "*_GPL*", "*_LGPL*" and "*_MPL*"
 # so that we can also find files like MODULE_LICENSE_GPL_AND_AFL
 #
+license_files := $(call find-parent-file,$(LOCAL_PATH),MODULE_LICENSE*)
 gpl_license_file := $(call find-parent-file,$(LOCAL_PATH),MODULE_LICENSE*_GPL* MODULE_LICENSE*_MPL* MODULE_LICENSE*_LGPL*)
 ifneq ($(gpl_license_file),)
   my_module_tags += gnu
@@ -643,6 +644,21 @@
 
 INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
 
+##########################################################
+# Track module-level dependencies.
+# Use $(LOCAL_MODULE) instead of $(my_register_name) to ignore module's bitness.
+ALL_DEPS.MODULES := $(sort $(ALL_DEPS.MODULES) $(LOCAL_MODULE))
+ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS := $(sort \
+  $(ALL_MODULES.$(LOCAL_MODULE).ALL_DEPS) \
+  $(LOCAL_STATIC_LIBRARIES) \
+  $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+  $(LOCAL_SHARED_LIBRARIES) \
+  $(LOCAL_STATIC_JAVA_LIBRARIES) \
+  $(LOCAL_JAVA_LIBRARIES)\
+  $(LOCAL_JNI_SHARED_LIBRARIES))
+
+ALL_DEPS.$(LOCAL_MODULE).LICENSE := $(sort $(ALL_DEPS.$(LOCAL_MODULE).LICENSE) $(license_files))
+
 ###########################################################
 ## Take care of my_module_tags
 ###########################################################
diff --git a/core/clang/mips.mk b/core/clang/mips.mk
index 08daf40..70832a3 100644
--- a/core/clang/mips.mk
+++ b/core/clang/mips.mk
@@ -14,6 +14,11 @@
   -msynci \
   -mno-fused-madd
 
+# Temporary workaround for Mips clang++ problem,  creates
+#   relocated ptrs in read-only pic .gcc_exception_table;
+#   permanent fix pending at http://reviews.llvm.org/D9669
+CLANG_CONFIG_mips_UNKNOWN_CFLAGS += -Wl,--warn-shared-textrel
+
 # We don't have any mips flags to substitute yet.
 define subst-clang-incompatible-mips-flags
   $(1)
diff --git a/core/clang/mips64.mk b/core/clang/mips64.mk
index 612175c..ba9c1d1 100644
--- a/core/clang/mips64.mk
+++ b/core/clang/mips64.mk
@@ -14,6 +14,11 @@
   -msynci \
   -mno-fused-madd
 
+# Temporary workaround for Mips clang++ problem creating
+#   relocated ptrs in read-only pic .gcc_exception_table;
+#   permanent fix pending at http://reviews.llvm.org/D9669
+CLANG_CONFIG_mips64_UNKNOWN_CFLAGS += -Wl,--warn-shared-textrel
+
 # We don't have any mips64 flags to substitute yet.
 define subst-clang-incompatible-mips64-flags
   $(1)
diff --git a/core/combo/HOST_linux-x86.mk b/core/combo/HOST_linux-x86.mk
index 8eda6c0..3acf795 100644
--- a/core/combo/HOST_linux-x86.mk
+++ b/core/combo/HOST_linux-x86.mk
@@ -40,8 +40,7 @@
   -no-canonical-prefixes \
   -include $(call select-android-config-h,linux-x86)
 
-# TODO: Set _FORTIFY_SOURCE=2. Bug 20558757.
-$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -fstack-protector
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector
 
 # Workaround differences in inttypes.h between host and target.
 # See bug 12708004.
diff --git a/core/combo/HOST_linux-x86_64.mk b/core/combo/HOST_linux-x86_64.mk
index e268e41..b74990b 100644
--- a/core/combo/HOST_linux-x86_64.mk
+++ b/core/combo/HOST_linux-x86_64.mk
@@ -40,8 +40,7 @@
   -no-canonical-prefixes \
   -include $(call select-android-config-h,linux-x86)
 
-# TODO: Set _FORTIFY_SOURCE=2. Bug 20558757.
-HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -fstack-protector
+HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector
 
 # Workaround differences in inttypes.h between host and target.
 # See bug 12708004.
diff --git a/core/combo/TARGET_linux-arm64.mk b/core/combo/TARGET_linux-arm64.mk
index 2fceee0..0e4c6f1 100644
--- a/core/combo/TARGET_linux-arm64.mk
+++ b/core/combo/TARGET_linux-arm64.mk
@@ -114,6 +114,7 @@
 			-Wl,--fatal-warnings \
 			-Wl,-maarch64linux \
 			-Wl,--hash-style=gnu \
+			-Wl,--fix-cortex-a53-843419 \
 			$(arch_variant_ldflags)
 
 # Disable transitive dependency library symbol resolving.
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index e13790b..b116283 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -87,13 +87,6 @@
                            $(ADDRESS_SANITIZER_CONFIG_EXTRA_SHARED_LIBRARIES)
     my_static_libraries += $(ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES)
     my_ldflags += -Wl,-rpath,$($(LOCAL_2ND_ARCH_VAR_PREFIX)ADDRESS_SANITIZER_RPATH)
-
-    # http://b/20665974
-    # The relocation packer is causing the kernel to load the executable
-    # overlapping ASAN's shadow space.
-    ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES)
-        LOCAL_PACK_MODULE_RELOCATIONS := false
-    endif
   endif
 endif
 
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index da84bdd..c59cd34 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -8,16 +8,16 @@
 ifeq ($(strip $(LOCAL_CXX_STL)),default)
     ifndef LOCAL_SDK_VERSION
         # Platform code. Select the appropriate STL.
-        ifndef USE_MINGW
-            my_cxx_stl := libc++
-            ifdef LOCAL_IS_HOST_MODULE
-                ifneq (,$(BUILD_HOST_static))
-                    my_cxx_stl := libc++_static
-                endif
+        my_cxx_stl := libc++
+        ifdef LOCAL_IS_HOST_MODULE
+            ifneq (,$(BUILD_HOST_static))
+                my_cxx_stl := libc++_static
             endif
-        else
-            # libc++ is not supported on mingw.
-            my_cxx_stl := libstdc++
+
+            ifdef USE_MINGW
+                # libc++ is not supported on mingw.
+                my_cxx_stl := libstdc++
+            endif
         endif
     else
         my_cxx_stl := ndk
@@ -49,15 +49,17 @@
 ifneq ($(filter $(my_cxx_stl),libc++ libc++_static),)
     my_cflags += -D_USING_LIBCXX
     my_c_includes += external/libcxx/include
-    ifeq ($(my_cxx_stl),libc++)
-        my_shared_libraries += libc++
+
+    # Note that the structure of this means that LOCAL_CXX_STL := libc++ will
+    # use the static libc++ for static executables.
+    ifeq ($(my_link_type),dynamic)
+        ifeq ($(my_cxx_stl),libc++)
+            my_shared_libraries += libc++
+        else
+            my_static_libraries += libc++_static
+        endif
     else
         my_static_libraries += libc++_static
-        ifndef LOCAL_IS_HOST_MODULE
-            ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
-                my_static_libraries += libm libc libdl
-            endif
-        endif
     endif
 
     ifdef LOCAL_IS_HOST_MODULE
@@ -71,23 +73,13 @@
         endif
 
         ifeq ($(my_link_type),static)
-            my_static_libraries += libdl
+            my_static_libraries += libm libc libdl
         else
             my_shared_libraries += libdl
         endif
     endif
-else ifneq ($(filter $(my_cxx_stl),stlport stlport_static),)
-    ifndef LOCAL_IS_HOST_MODULE
-        my_c_includes += external/stlport/stlport bionic/libstdc++/include \
-                         bionic
-        ifeq ($(my_cxx_stl),stlport)
-            my_shared_libraries += libstdc++ libstlport
-        else
-            my_static_libraries += libstdc++ libstlport_static
-        endif
-    endif
 else ifeq ($(my_cxx_stl),ndk)
-    # Using an NDK STL. Handled farther up in this file.
+    # Using an NDK STL. Handled in binary.mk.
     ifndef LOCAL_IS_HOST_MODULE
         my_system_shared_libraries += libstdc++
     endif
@@ -106,5 +98,5 @@
         my_ldlibs += $($(my_prefix)$(HOST_OS)_$(my_link_type)_gcclibs)
     endif
 else
-    $(error $(my_cxx_stl) is not a supported STL.)
+    $(error $(LOCAL_PATH): $(LOCAL_MODULE): $(my_cxx_stl) is not a supported STL.)
 endif
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index f015052..38c0cbe 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -50,8 +50,10 @@
   my_pack_module_relocations := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_PACK_MODULE_RELOCATIONS)
 endif
 
-# Do not pack relocations for static executables.
-ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
+# Do not pack relocations for executables. Because packing results in
+# non-zero p_vaddr which causes kernel to load executables to lower
+# address (starting at 0x8000) http://b/20665974
+ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES)
   my_pack_module_relocations := false
 endif
 
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 3bfcbe7..015cfee 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -255,6 +255,7 @@
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES := $($(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATES)/lib
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT)/lib
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES := $(HOST_OUT_EXECUTABLES)
+$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT_JAVA_LIBRARIES)
 
 # The default host library path.
 # It always points to the path where we build libraries in the default bitness.
diff --git a/core/main.mk b/core/main.mk
index 41e2801..3bb8c04 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -175,7 +175,7 @@
 #
 # For Java 1.7, we require OpenJDK on linux and Oracle JDK on Mac OS.
 requires_openjdk := false
-ifeq ($(HOST_OS), linux)
+ifeq ($(BUILD_OS),linux)
 requires_openjdk := true
 endif
 
diff --git a/core/multilib.mk b/core/multilib.mk
index a3ced65..e0615b2 100644
--- a/core/multilib.mk
+++ b/core/multilib.mk
@@ -13,3 +13,11 @@
 $(error $(LOCAL_PATH): Invalid LOCAL_MULTILIB specified for module $(LOCAL_MODULE))
 endif
 endif # my_module_multilib defined
+
+# Windows is a special case. Linux and Darwin are both multilib builds, but we
+# don't have a 64-bit Windows build, so make sure it's not a multilib build.
+ifdef LOCAL_IS_HOST_MODULE
+ifeq ($(HOST_OS),windows)
+my_module_multilib := 32
+endif
+endif
diff --git a/core/native_benchmark.mk b/core/native_benchmark.mk
index 431e40a..e5ca451 100644
--- a/core/native_benchmark.mk
+++ b/core/native_benchmark.mk
@@ -3,7 +3,7 @@
 ## Common flags for native benchmarks are added.
 ###########################################
 
-LOCAL_STATIC_LIBRARIES += libbenchmark
+LOCAL_STATIC_LIBRARIES += libbenchmark libbase
 
 ifndef LOCAL_MODULE_PATH
 LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/$(LOCAL_MODULE)
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index 23648f8..a1fe4e8 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -86,8 +86,7 @@
     $(eval $(call copy-one-file, $(built), $(installed)))\
     $(eval CTS_CASE_LIST_APKS += $(installed))))
 
-CTS_SHARED_LIBS := \
-	$(HOST_LIBRARY_PATH)/libc++$(HOST_SHLIB_SUFFIX)
+CTS_SHARED_LIBS :=
 
 DEFAULT_TEST_PLAN := $(cts_dir)/$(cts_name)/resource/plans
 $(cts_dir)/all_cts_files_stamp: $(CTS_CORE_CASES) $(CTS_TEST_CASES) $(CTS_CASE_LIST_APKS) $(JUNIT_HOST_JAR) $(HOSTTESTLIB_JAR) $(CTS_HOST_LIBRARY_JARS) $(TF_JAR) $(VMTESTSTF_JAR) $(CTS_TF_JAR) $(CTS_TF_EXEC_PATH) $(CTS_TF_README_PATH) $(ACP) $(CTS_TEST_JAR_FILES) $(CTS_SHARED_LIBS)
diff --git a/core/tasks/deps_licenses.mk b/core/tasks/deps_licenses.mk
new file mode 100644
index 0000000..bb20fa0
--- /dev/null
+++ b/core/tasks/deps_licenses.mk
@@ -0,0 +1,59 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Print modules and their transitive dependencies with license files.
+# To invoke, run "make deps-license PROJ_PATH=<proj-path-patterns> DEP_PATH=<dep-path-patterns>".
+# PROJ_PATH restricts the paths of the source modules; DEP_PATH restricts the paths of the dependency modules.
+# Both can be makefile patterns supported by makefile function $(filter).
+# Example: "make deps-license packages/app/% external/%" prints all modules in packages/app/ with their dpendencies in external/.
+# The printout lines look like "<module_name> :: <module_paths> :: <license_files>".
+
+ifneq (,$(filter deps-license,$(MAKECMDGOALS)))
+ifndef PROJ_PATH
+$(error To "make deps-license" you must specify PROJ_PATH and DEP_PATH.)
+endif
+ifndef DEP_PATH
+$(error To "make deps-license" you must specify PROJ_PATH and DEP_PATH.)
+endif
+
+# Expand a module's dependencies transitively.
+# $(1): the variable name to hold the result.
+# $(2): the initial module name.
+define get-module-all-dependencies
+$(eval _gmad_new := $(sort $(filter-out $($(1)),\
+  $(foreach m,$(2),$(ALL_DEPS.$(m).ALL_DEPS)))))\
+$(if $(_gmad_new),$(eval $(1) += $(_gmad_new))\
+  $(call get-module-all-dependencies,$(1),$(_gmad_new)))
+endef
+
+define print-deps-license
+$(foreach m, $(ALL_DEPS.MODULES),\
+  $(eval m_p := $(sort $(ALL_MODULES.$(m).PATH) $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).PATH)))\
+  $(if $(filter $(PROJ_PATH),$(m_p)),\
+    $(eval deps :=)\
+    $(eval $(call get-module-all-dependencies,deps,$(m)))\
+    $(info $(m) :: $(m_p) :: $(ALL_DEPS.$(m).LICENSE))\
+    $(foreach d,$(deps),\
+      $(eval d_p := $(sort $(ALL_MODULES.$(d).PATH) $(ALL_MODULES.$(d)$(TARGET_2ND_ARCH_MODULE_SUFFIX).PATH)))\
+      $(if $(filter $(DEP_PATH),$(d_p)),\
+        $(info $(space)$(space)$(space)$(space)$(d) :: $(d_p) :: $(ALL_DEPS.$(d).LICENSE))))))
+endef
+
+.PHONY: deps-license
+deps-license:
+	@$(call print-deps-license)
+
+endif
diff --git a/target/board/generic_mips64/BoardConfig.mk b/target/board/generic_mips64/BoardConfig.mk
index 5c13447..8e8a68b 100644
--- a/target/board/generic_mips64/BoardConfig.mk
+++ b/target/board/generic_mips64/BoardConfig.mk
@@ -66,10 +66,12 @@
 
 TARGET_USERIMAGES_USE_EXT4 := true
 BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1342177280  # 1.25 GB swag, 20% more than before
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 734003200
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 1610612736  # 1.5 GB, lots of space for running tests
 BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
 BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
 BOARD_FLASH_BLOCK_SIZE := 512
 TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
 
 BOARD_SEPOLICY_DIRS += build/target/board/generic/sepolicy
+
+DEX_PREOPT_DEFAULT := nostripping
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index d7b831f..c40de4f 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -52,7 +52,6 @@
     libpower \
     libsigchain \
     libstdc++ \
-    libstlport \
     libsurfaceflinger \
     libsurfaceflinger_ddmconnection \
     libsysutils \
diff --git a/tools/fs_config/fs_config.c b/tools/fs_config/fs_config.c
index f594c1e..fac3d93 100644
--- a/tools/fs_config/fs_config.c
+++ b/tools/fs_config/fs_config.c
@@ -24,7 +24,6 @@
 
 #include <selinux/selinux.h>
 #include <selinux/label.h>
-#include <selinux/android.h>
 
 #include "private/android_filesystem_config.h"
 
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 7984ad6..eab8113 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -33,10 +33,6 @@
 import tempfile
 import zipfile
 
-# missing in Python 2.4 and before
-if not hasattr(os, "SEEK_SET"):
-  os.SEEK_SET = 0
-
 import build_image
 import common
 
@@ -189,7 +185,7 @@
   assert succ, "build userdata.img image failed"
 
   common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
-  output_zip.write(img.name, prefix + "userdata.img")
+  common.ZipWrite(output_zip, img.name, prefix + "userdata.img")
   img.close()
   os.rmdir(user_dir)
   os.rmdir(temp_dir)
@@ -226,7 +222,7 @@
   assert succ, "build cache.img image failed"
 
   common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
-  output_zip.write(img.name, prefix + "cache.img")
+  common.ZipWrite(output_zip, img.name, prefix + "cache.img")
   img.close()
   os.rmdir(user_dir)
   os.rmdir(temp_dir)
@@ -252,7 +248,7 @@
     OPTIONS.info_dict["selinux_fc"] = os.path.join(
         OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts")
 
-  input_zip.close()
+  common.ZipClose(input_zip)
   output_zip = zipfile.ZipFile(filename, "a",
                                compression=zipfile.ZIP_DEFLATED)
 
@@ -297,7 +293,7 @@
   banner("cache")
   AddCache(output_zip)
 
-  output_zip.close()
+  common.ZipClose(output_zip)
 
 def main(argv):
   def option_handler(o, _):
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 8eb249a..0a387ec 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -82,6 +82,7 @@
   """A zero-length image."""
   blocksize = 4096
   care_map = RangeSet()
+  clobbered_blocks = RangeSet()
   total_blocks = 0
   file_map = {}
   def ReadRangeSet(self, ranges):
@@ -114,6 +115,7 @@
 
     self.total_blocks = len(self.data) / self.blocksize
     self.care_map = RangeSet(data=(0, self.total_blocks))
+    self.clobbered_blocks = RangeSet()
 
     zero_blocks = []
     nonzero_blocks = []
@@ -135,6 +137,8 @@
     return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
 
   def TotalSha1(self):
+    # DataImage always carries empty clobbered_blocks.
+    assert self.clobbered_blocks.size() == 0
     return sha1(self.data).hexdigest()
 
 
@@ -184,6 +188,10 @@
 #      (Typically a domain is a file, and the key in file_map is the
 #      pathname.)
 #
+#    clobbered_blocks: a RangeSet containing which blocks contain data
+#      but may be altered by the FS. They need to be excluded when
+#      verifying the partition integrity.
+#
 #    ReadRangeSet(): a function that takes a RangeSet and returns the
 #      data contained in the image blocks of that RangeSet.  The data
 #      is returned as a list or tuple of strings; concatenating the
@@ -193,7 +201,7 @@
 #
 #    TotalSha1(): a function that returns (as a hex string) the SHA-1
 #      hash of all the data in the image (ie, all the blocks in the
-#      care_map)
+#      care_map minus clobbered_blocks).
 #
 # When creating a BlockImageDiff, the src image may be None, in which
 # case the list of transfers produced will never read from the
@@ -445,7 +453,6 @@
       if free_string:
         out.append("".join(free_string))
 
-
       # sanity check: abort if we're going to need more than 512 MB if
       # stash space
       assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
@@ -845,6 +852,12 @@
                  "zero", self.transfers)
         continue
 
+      elif tgt_fn == "__COPY":
+        # "__COPY" domain includes all the blocks not contained in any
+        # file and that need to be copied unconditionally to the target.
+        Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+        continue
+
       elif tgt_fn in self.src.file_map:
         # Look for an exact pathname match in the source.
         Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 9f8a8ec..3909b4c 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -205,8 +205,8 @@
   Returns:
     True iff the image is built successfully.
   """
-  # system_root_image=true: build a system.img that combines the contents of /system
-  # and the ramdisk, and can be mounted at the root of the file system.
+  # system_root_image=true: build a system.img that combines the contents of
+  # /system and the ramdisk, and can be mounted at the root of the file system.
   origin_in = in_dir
   fs_config = prop_dict.get("fs_config")
   if (prop_dict.get("system_root_image") == "true"
@@ -375,8 +375,8 @@
     copy_prop("system_size", "partition_size")
     copy_prop("system_journal_size", "journal_size")
     copy_prop("system_verity_block_device", "verity_block_device")
-    copy_prop("system_root_image","system_root_image")
-    copy_prop("ramdisk_dir","ramdisk_dir")
+    copy_prop("system_root_image", "system_root_image")
+    copy_prop("ramdisk_dir", "ramdisk_dir")
   elif mount_point == "data":
     # Copy the generic fs type first, override with specific one if available.
     copy_prop("fs_type", "fs_type")
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 592ed19..8af1c17 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -32,10 +32,7 @@
 import blockimgdiff
 import rangelib
 
-try:
-  from hashlib import sha1 as sha1
-except ImportError:
-  from sha import sha as sha1
+from hashlib import sha1 as sha1
 
 
 class Options(object):
@@ -52,6 +49,8 @@
     self.java_args = "-Xmx2048m" # JVM Args
     self.public_key_suffix = ".x509.pem"
     self.private_key_suffix = ".pk8"
+    # use otatools built boot_signer by default
+    self.boot_signer_path = "boot_signer"
     self.verbose = False
     self.tempfiles = []
     self.device_specific = None
@@ -362,7 +361,8 @@
 
   if info_dict.get("verity_key", None):
     path = "/" + os.path.basename(sourcedir).lower()
-    cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8",
+    cmd = [OPTIONS.boot_signer_path, path, img.name,
+           info_dict["verity_key"] + ".pk8",
            info_dict["verity_key"] + ".x509.pem", img.name]
     p = Run(cmd, stdout=subprocess.PIPE)
     p.communicate()
@@ -380,6 +380,10 @@
     p.communicate()
     assert p.returncode == 0, "vboot_signer of %s image failed" % path
 
+    # Clean up the temp files.
+    img_unsigned.close()
+    img_keyblock.close()
+
   img.seek(os.SEEK_SET, 0)
   data = img.read()
 
@@ -652,7 +656,8 @@
         argv, "hvp:s:x:" + extra_opts,
         ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
          "java_path=", "java_args=", "public_key_suffix=",
-         "private_key_suffix=", "device_specific=", "extra="] +
+         "private_key_suffix=", "boot_signer_path=", "device_specific=",
+         "extra="] +
         list(extra_long_opts))
   except getopt.GetoptError as err:
     Usage(docstring)
@@ -679,6 +684,8 @@
       OPTIONS.public_key_suffix = a
     elif o in ("--private_key_suffix",):
       OPTIONS.private_key_suffix = a
+    elif o in ("--boot_signer_path",):
+      OPTIONS.boot_signer_path = a
     elif o in ("-s", "--device_specific"):
       OPTIONS.device_specific = a
     elif o in ("-x", "--extra"):
@@ -854,16 +861,50 @@
     zipfile.ZIP64_LIMIT = saved_zip64_limit
 
 
-def ZipWriteStr(zip_file, filename, data, perms=0o644, compression=None):
-  # use a fixed timestamp so the output is repeatable.
-  zinfo = zipfile.ZipInfo(filename=filename,
-                          date_time=(2009, 1, 1, 0, 0, 0))
-  if compression is None:
+def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=0o644,
+                compress_type=None):
+  """Wrap zipfile.writestr() function to work around the zip64 limit.
+
+  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
+  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
+  when calling crc32(bytes).
+
+  But it still works fine to write a shorter string into a large zip file.
+  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
+  when we know the string won't be too long.
+  """
+
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
+    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
     zinfo.compress_type = zip_file.compression
   else:
-    zinfo.compress_type = compression
+    zinfo = zinfo_or_arcname
+
+  # If compress_type is given, it overrides the value in zinfo.
+  if compress_type is not None:
+    zinfo.compress_type = compress_type
+
+  # Use a fixed timestamp so the output is repeatable.
   zinfo.external_attr = perms << 16
+  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
+
   zip_file.writestr(zinfo, data)
+  zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
+def ZipClose(zip_file):
+  # http://b/18015246
+  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
+  # central directory.
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  zip_file.close()
+
+  zipfile.ZIP64_LIMIT = saved_zip64_limit
 
 
 class DeviceSpecificParams(object):
@@ -969,7 +1010,7 @@
     return t
 
   def AddToZip(self, z, compression=None):
-    ZipWriteStr(z, self.name, self.data, compression=compression)
+    ZipWriteStr(z, self.name, self.data, compress_type=compression)
 
 DIFF_PROGRAM_BY_EXT = {
     ".gz" : "imgdiff",
@@ -1106,6 +1147,9 @@
     self.partition = partition
     self.check_first_block = check_first_block
 
+    # Due to http://b/20939131, check_first_block is disabled temporarily.
+    assert not self.check_first_block
+
     if version is None:
       version = 1
       if OPTIONS.info_dict:
@@ -1139,18 +1183,18 @@
     if not self.src:
       script.Print("Image %s will be patched unconditionally." % (partition,))
     else:
+      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+      ranges_str = ranges.to_string_raw()
       if self.version >= 3:
         script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
                             'block_image_verify("%s", '
                             'package_extract_file("%s.transfer.list"), '
                             '"%s.new.dat", "%s.patch.dat")) then') % (
-                            self.device, self.src.care_map.to_string_raw(),
-                            self.src.TotalSha1(),
+                            self.device, ranges_str, self.src.TotalSha1(),
                             self.device, partition, partition, partition))
       else:
         script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
-            self.device, self.src.care_map.to_string_raw(),
-            self.src.TotalSha1()))
+                           self.device, ranges_str, self.src.TotalSha1()))
       script.Print('Verified %s image...' % (partition,))
       script.AppendExtra('else')
 
@@ -1198,6 +1242,9 @@
 
     return ctx.hexdigest()
 
+  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
+  # remounting R/W. Will change the checking to a finer-grained way to
+  # mask off those bits.
   def _CheckFirstBlock(self, script):
     r = rangelib.RangeSet((0, 1))
     srchash = self._HashBlocks(self.src, r)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 8c5acd8..c486992 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -43,8 +43,9 @@
 
 def CopyInfo(output_zip):
   """Copy the android-info.txt file from the input to the output."""
-  output_zip.write(os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
-                   "android-info.txt")
+  common.ZipWrite(
+      output_zip, os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
+      "android-info.txt")
 
 
 def main(argv):
@@ -133,13 +134,7 @@
 
   finally:
     print "cleaning up..."
-    # http://b/18015246
-    # See common.py for context.  zipfile also refers to ZIP64_LIMIT during
-    # close() when it writes out the central directory.
-    saved_zip64_limit = zipfile.ZIP64_LIMIT
-    zipfile.ZIP64_LIMIT = (1 << 32) - 1
-    output_zip.close()
-    zipfile.ZIP64_LIMIT = saved_zip64_limit
+    common.ZipClose(output_zip)
     shutil.rmtree(OPTIONS.input_tmp)
 
   print "done."
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index eab3daa..c4d0c1b 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -92,7 +92,6 @@
   print >> sys.stderr, "Python 2.7 or newer is required."
   sys.exit(1)
 
-import copy
 import multiprocessing
 import os
 import tempfile
@@ -371,6 +370,7 @@
         symlinks.append((input_zip.read(info.filename),
                          "/" + partition + "/" + basefilename))
       else:
+        import copy
         info2 = copy.copy(info)
         fn = info2.filename = partition + "/" + basefilename
         if substitute and fn in substitute and substitute[fn] is None:
@@ -380,7 +380,7 @@
             data = substitute[fn]
           else:
             data = input_zip.read(info.filename)
-          output_zip.writestr(info2, data)
+          common.ZipWriteStr(output_zip, info2, data)
         if fn.endswith("/"):
           itemset.Get(fn[:-1], is_dir=True)
         else:
@@ -475,7 +475,13 @@
       path = add_img_to_target_files.BuildVendor(
           tmpdir, info_dict, block_list=mappath)
 
-  return sparse_img.SparseImage(path, mappath)
+  # Bug: http://b/20939131
+  # In ext4 filesystems, block 0 might be changed even being mounted
+  # R/O. We add it to clobbered_blocks so that it will be written to the
+  # target unconditionally. Note that they are still part of care_map.
+  clobbered_blocks = "0"
+
+  return sparse_img.SparseImage(path, mappath, clobbered_blocks)
 
 
 def WriteFullOTAPackage(input_zip, output_zip):
@@ -773,7 +779,6 @@
         OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
 
   system_diff = common.BlockDifference("system", system_tgt, system_src,
-                                       check_first_block=True,
                                        version=blockimgdiff_version)
 
   if HasVendorPartition(target_zip):
@@ -784,7 +789,6 @@
     vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
                           OPTIONS.target_info_dict)
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
-                                         check_first_block=True,
                                          version=blockimgdiff_version)
   else:
     vendor_diff = None
@@ -1581,6 +1585,7 @@
         OPTIONS.package_key = OPTIONS.info_dict.get(
             "default_system_dev_certificate",
             "build/target/product/security/testkey")
+      common.ZipClose(output_zip)
       break
 
     else:
@@ -1601,15 +1606,14 @@
         common.DumpInfoDict(OPTIONS.source_info_dict)
       try:
         WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
+        common.ZipClose(output_zip)
         break
       except ValueError:
         if not OPTIONS.fallback_to_full:
           raise
         print "--- failed to build incremental; falling back to full ---"
         OPTIONS.incremental_source = None
-        output_zip.close()
-
-  output_zip.close()
+        common.ZipClose(output_zip)
 
   if not OPTIONS.no_signing:
     SignOutput(temp_zip_file.name, args[1])
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index d47cc4f..ec49112 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -196,23 +196,23 @@
       if key not in common.SPECIAL_CERT_STRINGS:
         print "    signing: %-*s (%s)" % (maxsize, name, key)
         signed_data = SignApk(data, key, key_passwords[key])
-        output_tf_zip.writestr(out_info, signed_data)
+        common.ZipWriteStr(output_tf_zip, out_info, signed_data)
       else:
         # an APK we're not supposed to sign.
         print "NOT signing: %s" % (name,)
-        output_tf_zip.writestr(out_info, data)
+        common.ZipWriteStr(output_tf_zip, out_info, data)
     elif info.filename in ("SYSTEM/build.prop",
                            "VENDOR/build.prop",
                            "RECOVERY/RAMDISK/default.prop"):
       print "rewriting %s:" % (info.filename,)
       new_data = RewriteProps(data, misc_info)
-      output_tf_zip.writestr(out_info, new_data)
+      common.ZipWriteStr(output_tf_zip, out_info, new_data)
       if info.filename == "RECOVERY/RAMDISK/default.prop":
         write_to_temp(info.filename, info.external_attr, new_data)
     elif info.filename.endswith("mac_permissions.xml"):
       print "rewriting %s with new keys." % (info.filename,)
       new_data = ReplaceCerts(data)
-      output_tf_zip.writestr(out_info, new_data)
+      common.ZipWriteStr(output_tf_zip, out_info, new_data)
     elif info.filename in ("SYSTEM/recovery-from-boot.p",
                            "SYSTEM/bin/install-recovery.sh"):
       rebuild_recovery = True
@@ -229,7 +229,7 @@
       pass
     else:
       # a non-APK file; copy it verbatim
-      output_tf_zip.writestr(out_info, data)
+      common.ZipWriteStr(output_tf_zip, out_info, data)
 
   if OPTIONS.replace_ota_keys:
     new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
@@ -243,7 +243,7 @@
         "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info)
 
     def output_sink(fn, data):
-      output_tf_zip.writestr("SYSTEM/"+fn, data)
+      common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data)
 
     common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img,
                              info_dict=misc_info)
@@ -488,8 +488,8 @@
   ProcessTargetFiles(input_zip, output_zip, misc_info,
                      apk_key_map, key_passwords)
 
-  input_zip.close()
-  output_zip.close()
+  common.ZipClose(input_zip)
+  common.ZipClose(output_zip)
 
   add_img_to_target_files.AddImagesToTargetFiles(args[1])
 
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index b97bb84..2ac97ac 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -21,10 +21,17 @@
 
 
 class SparseImage(object):
-  """Wraps a sparse image file (and optional file map) into an image
-  object suitable for passing to BlockImageDiff."""
+  """Wraps a sparse image file into an image object.
 
-  def __init__(self, simg_fn, file_map_fn=None):
+  Wraps a sparse image file (and optional file map and clobbered_blocks) into
+  an image object suitable for passing to BlockImageDiff. file_map contains
+  the mapping between files and their blocks. clobbered_blocks contains the set
+  of blocks that should be always written to the target regardless of the old
+  contents (i.e. copying instead of patching). clobbered_blocks should be in
+  the form of a string like "0" or "0 1-5 8".
+  """
+
+  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
     self.simg_f = f = open(simg_fn, "rb")
 
     header_bin = f.read(28)
@@ -57,6 +64,7 @@
     pos = 0   # in blocks
     care_data = []
     self.offset_map = offset_map = []
+    self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
 
     for i in range(total_chunks):
       header_bin = f.read(12)
@@ -103,7 +111,7 @@
     self.offset_index = [i[0] for i in offset_map]
 
     if file_map_fn:
-      self.LoadFileBlockMap(file_map_fn)
+      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
     else:
       self.file_map = {"__DATA": self.care_map}
 
@@ -111,9 +119,10 @@
     return [d for d in self._GetRangeData(ranges)]
 
   def TotalSha1(self):
-    """Return the SHA-1 hash of all data in the 'care' regions of this image."""
+    """Return the SHA-1 hash of all data in the 'care' regions but not in
+    clobbered_blocks of this image."""
     h = sha1()
-    for d in self._GetRangeData(self.care_map):
+    for d in self._GetRangeData(self.care_map.subtract(self.clobbered_blocks)):
       h.update(d)
     return h.hexdigest()
 
@@ -156,7 +165,7 @@
           yield fill_data * (this_read * (self.blocksize >> 2))
         to_read -= this_read
 
-  def LoadFileBlockMap(self, fn):
+  def LoadFileBlockMap(self, fn, clobbered_blocks):
     remaining = self.care_map
     self.file_map = out = {}
 
@@ -166,14 +175,20 @@
         ranges = rangelib.RangeSet.parse(ranges)
         out[fn] = ranges
         assert ranges.size() == ranges.intersect(remaining).size()
+
+        # Currently we assume that blocks in clobbered_blocks are not part of
+        # any file.
+        assert not clobbered_blocks.overlaps(ranges)
         remaining = remaining.subtract(ranges)
 
+    remaining = remaining.subtract(clobbered_blocks)
+
     # For all the remaining blocks in the care_map (ie, those that
-    # aren't part of the data for any file), divide them into blocks
-    # that are all zero and blocks that aren't.  (Zero blocks are
-    # handled specially because (1) there are usually a lot of them
-    # and (2) bsdiff handles files with long sequences of repeated
-    # bytes especially poorly.)
+    # aren't part of the data for any file nor part of the clobbered_blocks),
+    # divide them into blocks that are all zero and blocks that aren't.
+    # (Zero blocks are handled specially because (1) there are usually
+    # a lot of them and (2) bsdiff handles files with long sequences of
+    # repeated bytes especially poorly.)
 
     zero_blocks = []
     nonzero_blocks = []
@@ -203,6 +218,7 @@
 
     out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
     out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
+    out["__COPY"] = clobbered_blocks
 
   def ResetFileMap(self):
     """Throw away the file map and treat the entire image as
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 5fdc132..f28934d 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -29,15 +29,54 @@
     data[begin:end] = os.urandom(block_size)
   return "".join(data)
 
+def get_2gb_string():
+  kilobytes = 1024
+  megabytes = 1024 * kilobytes
+  gigabytes = 1024 * megabytes
+
+  size = int(2 * gigabytes + 1)
+  block_size = 4 * kilobytes
+  step_size = 4 * megabytes
+  two_gb_string = random_string_with_holes(
+        size, block_size, step_size)
+  return two_gb_string
+
 
 class CommonZipTest(unittest.TestCase):
+  def _verify(self, zip_file, zip_file_name, arcname, contents,
+              test_file_name=None, expected_stat=None, expected_mode=0o644,
+              expected_compress_type=zipfile.ZIP_STORED):
+    # Verify the stat if present.
+    if test_file_name is not None:
+      new_stat = os.stat(test_file_name)
+      self.assertEqual(int(expected_stat.st_mode), int(new_stat.st_mode))
+      self.assertEqual(int(expected_stat.st_mtime), int(new_stat.st_mtime))
+
+    # Reopen the zip file to verify.
+    zip_file = zipfile.ZipFile(zip_file_name, "r")
+
+    # Verify the timestamp.
+    info = zip_file.getinfo(arcname)
+    self.assertEqual(info.date_time, (2009, 1, 1, 0, 0, 0))
+
+    # Verify the file mode.
+    mode = (info.external_attr >> 16) & 0o777
+    self.assertEqual(mode, expected_mode)
+
+    # Verify the compress type.
+    self.assertEqual(info.compress_type, expected_compress_type)
+
+    # Verify the zip contents.
+    self.assertEqual(zip_file.read(arcname), contents)
+    self.assertIsNone(zip_file.testzip())
+
   def _test_ZipWrite(self, contents, extra_zipwrite_args=None):
     extra_zipwrite_args = dict(extra_zipwrite_args or {})
 
     test_file = tempfile.NamedTemporaryFile(delete=False)
-    zip_file = tempfile.NamedTemporaryFile(delete=False)
-
     test_file_name = test_file.name
+
+    zip_file = tempfile.NamedTemporaryFile(delete=False)
     zip_file_name = zip_file.name
 
     # File names within an archive strip the leading slash.
@@ -52,31 +91,100 @@
       test_file.write(contents)
       test_file.close()
 
-      old_stat = os.stat(test_file_name)
+      expected_stat = os.stat(test_file_name)
       expected_mode = extra_zipwrite_args.get("perms", 0o644)
-
+      expected_compress_type = extra_zipwrite_args.get("compress_type",
+                                                       zipfile.ZIP_STORED)
       time.sleep(5)  # Make sure the atime/mtime will change measurably.
 
       common.ZipWrite(zip_file, test_file_name, **extra_zipwrite_args)
+      common.ZipClose(zip_file)
 
-      new_stat = os.stat(test_file_name)
-      self.assertEqual(int(old_stat.st_mode), int(new_stat.st_mode))
-      self.assertEqual(int(old_stat.st_mtime), int(new_stat.st_mtime))
-      self.assertIsNone(zip_file.testzip())
-
-      zip_file.close()
-      zip_file = zipfile.ZipFile(zip_file_name, "r")
-      info = zip_file.getinfo(arcname)
-
-      self.assertEqual(info.date_time, (2009, 1, 1, 0, 0, 0))
-      mode = (info.external_attr >> 16) & 0o777
-      self.assertEqual(mode, expected_mode)
-      self.assertEqual(zip_file.read(arcname), contents)
-      self.assertIsNone(zip_file.testzip())
+      self._verify(zip_file, zip_file_name, arcname, contents, test_file_name,
+                   expected_stat, expected_mode, expected_compress_type)
     finally:
       os.remove(test_file_name)
       os.remove(zip_file_name)
 
+  def _test_ZipWriteStr(self, zinfo_or_arcname, contents, extra_args=None):
+    extra_args = dict(extra_args or {})
+
+    zip_file = tempfile.NamedTemporaryFile(delete=False)
+    zip_file_name = zip_file.name
+    zip_file.close()
+
+    zip_file = zipfile.ZipFile(zip_file_name, "w")
+
+    try:
+      expected_compress_type = extra_args.get("compress_type",
+                                              zipfile.ZIP_STORED)
+      time.sleep(5)  # Make sure the atime/mtime will change measurably.
+
+      if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
+        zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
+      else:
+        zinfo = zinfo_or_arcname
+      arcname = zinfo.filename
+
+      common.ZipWriteStr(zip_file, zinfo, contents, **extra_args)
+      common.ZipClose(zip_file)
+
+      self._verify(zip_file, zip_file_name, arcname, contents,
+                   expected_compress_type=expected_compress_type)
+    finally:
+      os.remove(zip_file_name)
+
+  def _test_ZipWriteStr_large_file(self, large, small, extra_args=None):
+    extra_args = dict(extra_args or {})
+
+    zip_file = tempfile.NamedTemporaryFile(delete=False)
+    zip_file_name = zip_file.name
+
+    test_file = tempfile.NamedTemporaryFile(delete=False)
+    test_file_name = test_file.name
+
+    arcname_large = test_file_name
+    arcname_small = "bar"
+
+    # File names within an archive strip the leading slash.
+    if arcname_large[0] == "/":
+      arcname_large = arcname_large[1:]
+
+    zip_file.close()
+    zip_file = zipfile.ZipFile(zip_file_name, "w")
+
+    try:
+      test_file.write(large)
+      test_file.close()
+
+      expected_stat = os.stat(test_file_name)
+      expected_mode = 0o644
+      expected_compress_type = extra_args.get("compress_type",
+                                              zipfile.ZIP_STORED)
+      time.sleep(5)  # Make sure the atime/mtime will change measurably.
+
+      common.ZipWrite(zip_file, test_file_name, **extra_args)
+      common.ZipWriteStr(zip_file, arcname_small, small, **extra_args)
+      common.ZipClose(zip_file)
+
+      # Verify the contents written by ZipWrite().
+      self._verify(zip_file, zip_file_name, arcname_large, large,
+                   test_file_name, expected_stat, expected_mode,
+                   expected_compress_type)
+
+      # Verify the contents written by ZipWriteStr().
+      self._verify(zip_file, zip_file_name, arcname_small, small,
+                   expected_compress_type=expected_compress_type)
+    finally:
+      os.remove(zip_file_name)
+      os.remove(test_file_name)
+
+  def _test_reset_ZIP64_LIMIT(self, func, *args):
+    default_limit = (1 << 31) - 1
+    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+    func(*args)
+    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+
   def test_ZipWrite(self):
     file_contents = os.urandom(1024)
     self._test_ZipWrite(file_contents)
@@ -88,23 +196,64 @@
         "perms": 0o777,
         "compress_type": zipfile.ZIP_DEFLATED,
     })
+    self._test_ZipWrite(file_contents, {
+        "arcname": "foobar",
+        "perms": 0o700,
+        "compress_type": zipfile.ZIP_STORED,
+    })
 
   def test_ZipWrite_large_file(self):
-    kilobytes = 1024
-    megabytes = 1024 * kilobytes
-    gigabytes = 1024 * megabytes
-
-    size = int(2 * gigabytes + 1)
-    block_size = 4 * kilobytes
-    step_size = 4 * megabytes
-    file_contents = random_string_with_holes(
-        size, block_size, step_size)
+    file_contents = get_2gb_string()
     self._test_ZipWrite(file_contents, {
         "compress_type": zipfile.ZIP_DEFLATED,
     })
 
   def test_ZipWrite_resets_ZIP64_LIMIT(self):
-    default_limit = (1 << 31) - 1
-    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
-    self._test_ZipWrite('')
-    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+    self._test_reset_ZIP64_LIMIT(self._test_ZipWrite, "")
+
+  def test_ZipWriteStr(self):
+    random_string = os.urandom(1024)
+    # Passing arcname
+    self._test_ZipWriteStr("foo", random_string)
+
+    # Passing zinfo
+    zinfo = zipfile.ZipInfo(filename="foo")
+    self._test_ZipWriteStr(zinfo, random_string)
+
+    # Timestamp in the zinfo should be overwritten.
+    zinfo.date_time = (2015, 3, 1, 15, 30, 0)
+    self._test_ZipWriteStr(zinfo, random_string)
+
+  def test_ZipWriteStr_with_opts(self):
+    random_string = os.urandom(1024)
+    # Passing arcname
+    self._test_ZipWriteStr("foo", random_string, {
+        "compress_type": zipfile.ZIP_DEFLATED,
+    })
+    self._test_ZipWriteStr("foo", random_string, {
+        "compress_type": zipfile.ZIP_STORED,
+    })
+
+    # Passing zinfo
+    zinfo = zipfile.ZipInfo(filename="foo")
+    self._test_ZipWriteStr(zinfo, random_string, {
+        "compress_type": zipfile.ZIP_DEFLATED,
+    })
+    self._test_ZipWriteStr(zinfo, random_string, {
+        "compress_type": zipfile.ZIP_STORED,
+    })
+
+  def test_ZipWriteStr_large_file(self):
+    # zipfile.writestr() doesn't work when the str size is over 2GiB even with
+    # the workaround. We will only test the case of writing a string into a
+    # large archive.
+    long_string = get_2gb_string()
+    short_string = os.urandom(1024)
+    self._test_ZipWriteStr_large_file(long_string, short_string, {
+        "compress_type": zipfile.ZIP_DEFLATED,
+    })
+
+  def test_ZipWriteStr_resets_ZIP64_LIMIT(self):
+    self._test_reset_ZIP64_LIMIT(self._test_ZipWriteStr, "foo", "")
+    zinfo = zipfile.ZipInfo(filename="foo")
+    self._test_reset_ZIP64_LIMIT(self._test_ZipWriteStr, zinfo, "")