summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulian Andres Klode <julian.klode@canonical.com>2020-08-10 11:39:30 +0200
committerJulian Andres Klode <julian.klode@canonical.com>2020-08-10 11:39:30 +0200
commitfa375493c5a4ed9c10d4e5257ac82c6e687862d3 (patch)
tree0851a47e2d4d3686fd2072c5e3c72c878356c887
parent4b439208203cd584e158fd240a3a4a72d1248099 (diff)
Do not retry on failure to fetch
While we fixed the infinite retrying earlier, we still have problems if we retry in the middle of a transfer, we might end up resuming downloads that are already done and read more than we should (removing the IsOpen() check so that it always retries makes test-ubuntu-bug-1098738-apt-get-source-md5sum fail with wrong file sizes). I think the retrying was added to fixup pipelining messups, but we have better solutions now, so let's get rid of it, until we have implemented this properly.
-rw-r--r--methods/basehttp.cc33
1 files changed, 13 insertions, 20 deletions
diff --git a/methods/basehttp.cc b/methods/basehttp.cc
index 5e29e0ce1..b8ab73155 100644
--- a/methods/basehttp.cc
+++ b/methods/basehttp.cc
@@ -770,31 +770,24 @@ int BaseHttpMethod::Loop()
}
else
{
- if (Server->IsOpen() == false && FailCounter < 1)
+ if (not Server->IsOpen())
{
- FailCounter++;
- Server->Close();
- _error->Discard();
-
// Reset the pipeline
QueueBack = Queue;
Server->PipelineAnswersReceived = 0;
- continue;
}
- else
- {
- Server->Close();
- FailCounter = 0;
- switch (Result)
- {
- case ResultState::TRANSIENT_ERROR:
- Fail(true);
- break;
- case ResultState::FATAL_ERROR:
- case ResultState::SUCCESSFUL:
- Fail(false);
- break;
- }
+
+ Server->Close();
+ FailCounter = 0;
+ switch (Result)
+ {
+ case ResultState::TRANSIENT_ERROR:
+ Fail(true);
+ break;
+ case ResultState::FATAL_ERROR:
+ case ResultState::SUCCESSFUL:
+ Fail(false);
+ break;
}
}
break;