AU: Retry up to 3 times 30 seconds apart on HTTP response code 0.
This is a temporary way to avoid failing to do an update check on resume by
allowing at least 1.5 minutes for the network to go online.
BUG=9705
TEST=tested on device through suspend/resume; unit tests
Change-Id: I291a1c31ce87c17d5dce0e30488d454d7690ddbc
Review URL: http://codereview.chromium.org/5260004
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index c6293b1..170abfa 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -25,6 +25,7 @@
namespace {
const int kMaxRetriesCount = 20;
+const int kNoNetworkRetrySeconds = 30;
const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates";
} // namespace {}
@@ -152,6 +153,7 @@
transfer_size_ = -1;
resume_offset_ = 0;
retry_count_ = 0;
+ no_network_retry_count_ = 0;
http_response_code_ = 0;
ResolveProxiesForUrl(url);
ResumeTransfer(url);
@@ -192,6 +194,7 @@
GetHttpResponseCode();
if (http_response_code_) {
LOG(INFO) << "HTTP response code: " << http_response_code_;
+ no_network_retry_count_ = 0;
} else {
LOG(ERROR) << "Unable to get http response code.";
}
@@ -199,6 +202,21 @@
// we're done!
CleanUp();
+ // TODO(petkov): This temporary code tries to deal with the case where the
+ // update engine performs an update check while the network is not ready
+ // (e.g., right after resume). Longer term, we should check if the network
+ // is online/offline and return an appropriate error code.
+ if (!sent_byte_ &&
+ http_response_code_ == 0 &&
+ no_network_retry_count_ < no_network_max_retries_) {
+ no_network_retry_count_++;
+ g_timeout_add_seconds(kNoNetworkRetrySeconds,
+ &LibcurlHttpFetcher::StaticRetryTimeoutCallback,
+ this);
+ LOG(INFO) << "No HTTP response, retry " << no_network_retry_count_;
+ return;
+ }
+
if (!sent_byte_ &&
(http_response_code_ < 200 || http_response_code_ >= 300)) {
// The transfer completed w/ error and we didn't get any bytes.
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index c41804e..c5f784b 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -33,6 +33,8 @@
resume_offset_(0),
retry_count_(0),
retry_seconds_(60),
+ no_network_retry_count_(0),
+ no_network_max_retries_(0),
idle_seconds_(1),
force_connection_type_(false),
forced_expensive_connection_(false),
@@ -74,6 +76,10 @@
// Sets the retry timeout. Useful for testing.
void set_retry_seconds(int seconds) { retry_seconds_ = seconds; }
+ void set_no_network_max_retries(int retries) {
+ no_network_max_retries_ = retries;
+ }
+
void SetConnectionAsExpensive(bool is_expensive) {
force_connection_type_ = true;
forced_expensive_connection_ = is_expensive;
@@ -185,6 +191,10 @@
// Seconds to wait before retrying a resume.
int retry_seconds_;
+ // Number of resumes due to no network (e.g., HTTP response code 0).
+ int no_network_retry_count_;
+ int no_network_max_retries_;
+
// Seconds to wait before asking libcurl to "perform".
int idle_seconds_;
@@ -200,7 +210,7 @@
// If true, we are currently performing a write callback on the delegate.
bool in_write_callback_;
-
+
// If true, we have returned at least one byte in the write callback
// to the delegate.
bool sent_byte_;
diff --git a/update_attempter.cc b/update_attempter.cc
index 4e7d7bf..7c958cb 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -162,11 +162,18 @@
processor_->set_delegate(this);
// Actions:
+ LibcurlHttpFetcher* update_check_fetcher =
+ new LibcurlHttpFetcher(GetProxyResolver());
+ // If this is an automatic check, try harder to connect to the network. See
+ // comment in libcurl_http_fetcher.cc.
+ if (!obey_proxies) {
+ update_check_fetcher->set_no_network_max_retries(3);
+ }
shared_ptr<OmahaRequestAction> update_check_action(
new OmahaRequestAction(prefs_,
omaha_request_params_,
NULL,
- new LibcurlHttpFetcher(GetProxyResolver())));
+ update_check_fetcher)); // passes ownership
shared_ptr<OmahaResponseHandlerAction> response_handler_action(
new OmahaResponseHandlerAction(prefs_));
shared_ptr<FilesystemCopierAction> filesystem_copier_action(