Remove leaked callback when CleanUp() from TimeoutCallback().

When CleanUp() is called indirectly from TimeoutCallback(), the
TimeoutCallback() itself is canceled before the new recurrent call is
scheduled. The CancelTask() call will trivially succeed because the
callback already triggered.

Before CL:281197, the g_source_destroy() call to remove the currently
running callback would prevent it from being re-scheduled even if it
returns TRUE from the callback.

This patch re-schedules the callback before calling CurlPerformOnce()
so CancelTask() would cancel the scheduled task.

Bug: chromium:535649
Test: Added unittest. Verified it fails without the change.

Change-Id: Ica742dab0eb8d9d5c5055c8afac9d775ad1e0012
diff --git a/http_fetcher_unittest.cc b/http_fetcher_unittest.cc
index 33cedf2..f3a9c3e 100644
--- a/http_fetcher_unittest.cc
+++ b/http_fetcher_unittest.cc
@@ -711,6 +711,42 @@
   }
 }
 
+TYPED_TEST(HttpFetcherTest, NoResponseTest) {
+  // This test starts a new http server but the server doesn't respond and just
+  // closes the connection.
+  if (this->test_.IsMock())
+    return;
+
+  PythonHttpServer* server = new PythonHttpServer();
+  int port = server->GetPort();
+  ASSERT_TRUE(server->started_);
+
+  // Handles destruction and claims ownership.
+  FailureHttpFetcherTestDelegate delegate(server);
+  unique_ptr<HttpFetcher> fetcher(this->test_.NewSmallFetcher());
+  fetcher->set_delegate(&delegate);
+  // The server will not reply at all, so we can limit the execution time of the
+  // test by reducing the low-speed timeout to something small. The test will
+  // finish once the TimeoutCallback() triggers (every second) and the timeout
+  // expired.
+  fetcher->set_low_speed_limit(kDownloadLowSpeedLimitBps, 1);
+
+  this->loop_.PostTask(FROM_HERE, base::Bind(
+      StartTransfer,
+      fetcher.get(),
+      LocalServerUrlForPath(port, "/hang")));
+  this->loop_.Run();
+
+  // Check that no other callback runs in the next two seconds. That would
+  // indicate a leaked callback.
+  bool timeout = false;
+  auto callback = base::Bind([&timeout]{ timeout = true;});
+  this->loop_.PostDelayedTask(FROM_HERE, callback,
+                              base::TimeDelta::FromSeconds(2));
+  EXPECT_TRUE(this->loop_.RunOnce(true));
+  EXPECT_TRUE(timeout);
+}
+
 TYPED_TEST(HttpFetcherTest, ServerDiesTest) {
   // This test starts a new http server and kills it after receiving its first
   // set of bytes. It test whether or not our fetcher eventually gives up on
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index e8a8c4b..5b6c605 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -514,9 +514,6 @@
 }
 
 void LibcurlHttpFetcher::TimeoutCallback() {
-  if (transfer_in_progress_)
-    CurlPerformOnce();
-
   // We always re-schedule the callback, even if we don't want to be called
   // anymore. We will remove the event source separately if we don't want to
   // be called back.
@@ -524,6 +521,11 @@
       FROM_HERE,
       base::Bind(&LibcurlHttpFetcher::TimeoutCallback, base::Unretained(this)),
       TimeDelta::FromSeconds(idle_seconds_));
+
+  // CurlPerformOnce() may call CleanUp(), so we need to schedule our callback
+  // first, since it could be canceled by this call.
+  if (transfer_in_progress_)
+    CurlPerformOnce();
 }
 
 void LibcurlHttpFetcher::CleanUp() {
diff --git a/test_http_server.cc b/test_http_server.cc
index f4190c3..7326eb5 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -41,6 +41,7 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
@@ -450,6 +451,12 @@
   }
 }
 
+void HandleHang(int fd) {
+  LOG(INFO) << "Hanging until the other side of the connection is closed.";
+  char c;
+  while (HANDLE_EINTR(read(fd, &c, 1)) > 0) {}
+}
+
 void HandleDefault(int fd, const HttpRequest& request) {
   const off_t start_offset = request.start_offset;
   const string data("unhandled path");
@@ -517,6 +524,8 @@
   } else if (base::StartsWithASCII(url, "/error-if-offset/", true)) {
     const UrlTerms terms(url, 3);
     HandleErrorIfOffset(fd, request, terms.GetSizeT(1), terms.GetInt(2));
+  } else if (url == "/hang") {
+    HandleHang(fd);
   } else {
     HandleDefault(fd, request);
   }