From 331e8396ee5a4f2e7d276eddc54749b2a13dd789 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Mon, 16 Sep 2013 23:21:11 +0200 Subject: retry without partial data after a 416 response If we get a 416 from the server it means the Range we asked for is above the real filesize of the file on the server. Mostly this happens if the server isn't supporting If-Range, but regardless of how we end up with the partial data, the data is invalid so we discard it and retry with a fresh plate and hope for the best. Old behavior was to consider 416 an error and retry with a different compression until we ran out of compression and requested the uncompressed file (which doesn't exist on most mirrors) with an accept line which server answered with "406 Not Acceptable". Closes: 710924 --- methods/http.cc | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'methods') diff --git a/methods/http.cc b/methods/http.cc index 278ddb290..2a8b7f0fc 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -604,6 +604,8 @@ bool ServerState::HeaderLine(string Line) Size = strtoull(Val.c_str(), NULL, 10); if (Size >= std::numeric_limits::max()) return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); + else if (Size == 0) + HaveContent = false; return true; } @@ -616,8 +618,14 @@ bool ServerState::HeaderLine(string Line) if (stringcasecmp(Tag,"Content-Range:") == 0) { HaveContent = true; - - if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) + + // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 + if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) + { + StartPos = 1; // ignore Content-Length, it would override Size + HaveContent = false; + } + else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) return _error->Error(_("The HTTP server sent an invalid Content-Range header")); if ((unsigned long long)StartPos > Size) return _error->Error(_("This HTTP server has broken range support")); @@ -995,6 +1003,13 @@ HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) } /* else pass through for error message */ } + // retry after an invalid range response without partial data + else if (Srv->Result == 416 && FileExists(Queue->DestFile) == true && + unlink(Queue->DestFile.c_str()) == 0) + { + NextURI = Queue->Uri; + return TRY_AGAIN_OR_REDIRECT; + } /* We have a reply we dont handle. This should indicate a perm server failure */ -- cgit v1.2.3-70-g09d2 From 78c72d0ce22e00b194251445aae306df357d5c1a Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Tue, 17 Sep 2013 00:41:58 +0200 Subject: replace "filesize - 1" trick in http with proper 416 handling Our http client requests the "filesize - 1" for the small edgecase of handling a file which was completely downloaded, but not yet moved to the correct place as we get 416 errors in that case, but as we can handle 416 returns now we just special-case the situation of requesting the exact filesize and handle it as a 200 without content instead. --- methods/http.cc | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'methods') diff --git a/methods/http.cc b/methods/http.cc index 2a8b7f0fc..5f0dc03d9 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -750,7 +750,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) { // In this case we send an if-range query with a range header - sprintf(Buf,"Range: bytes=%lli-\r\nIf-Range: %s\r\n",(long long)SBuf.st_size - 1, + sprintf(Buf,"Range: bytes=%lli-\r\nIf-Range: %s\r\n",(long long)SBuf.st_size, TimeRFC1123(SBuf.st_mtime).c_str()); Req += Buf; } @@ -1004,11 +1004,24 @@ HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) /* else pass through for error message */ } // retry after an invalid range response without partial data - else if (Srv->Result == 416 && FileExists(Queue->DestFile) == true && - unlink(Queue->DestFile.c_str()) == 0) + else if (Srv->Result == 416) { - NextURI = Queue->Uri; - return TRY_AGAIN_OR_REDIRECT; + struct stat SBuf; + if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) + { + if ((unsigned long long)SBuf.st_size == Srv->Size) + { + // the file is completely downloaded, but was not moved + Srv->StartPos = Srv->Size; + Srv->Result = 200; + Srv->HaveContent = false; + } + else if (unlink(Queue->DestFile.c_str()) == 0) + { + NextURI = Queue->Uri; + return TRY_AGAIN_OR_REDIRECT; + } + } } /* We have a reply we dont handle. This should indicate a perm server @@ -1246,7 +1259,9 @@ int HttpMethod::Loop() URIStart(Res); // Run the data - bool Result = Server->RunData(); + bool Result = true; + if (Server->HaveContent) + Result = Server->RunData(); /* If the server is sending back sizeless responses then fill in the size now */ -- cgit v1.2.3-70-g09d2 From 7330f4df8b31e66f6557bf49c9c90ad9a73ff459 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Tue, 17 Sep 2013 22:35:44 +0200 Subject: refactor http client implementation No effective behavior change, just shuffling big junks of code between methods and classes to split them into those strongly related to our client implementation and those implementing HTTP. The idea is to get HTTPS to a point in which most of the implementation can be shared even though the client implementations itself is completely different. This isn't anywhere near yet though, but it should beenough to reuse at least a few lines from http in https now. Git-Dch: Ignore --- methods/http.cc | 991 +++++++++++------------------------------------------- methods/http.h | 136 +++----- methods/makefile | 4 +- methods/server.cc | 665 ++++++++++++++++++++++++++++++++++++ methods/server.h | 142 ++++++++ 5 files changed, 1049 insertions(+), 889 deletions(-) create mode 100644 methods/server.cc create mode 100644 methods/server.h (limited to 'methods') diff --git a/methods/http.cc b/methods/http.cc index 5f0dc03d9..d2f084b04 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -58,15 +58,6 @@ /*}}}*/ using namespace std; -string HttpMethod::FailFile; -int HttpMethod::FailFd = -1; -time_t HttpMethod::FailTime = 0; -unsigned long PipelineDepth = 0; -unsigned long TimeOut = 120; -bool AllowRedirect = false; -bool Debug = false; -URI Proxy; - unsigned long long CircleBuf::BwReadLimit=0; unsigned long long CircleBuf::BwTickReadData=0; struct timeval CircleBuf::BwReadTick={0,0}; @@ -296,20 +287,17 @@ CircleBuf::~CircleBuf() delete Hash; } -// ServerState::ServerState - Constructor /*{{{*/ -// --------------------------------------------------------------------- -/* */ -ServerState::ServerState(URI Srv,HttpMethod *Owner) : Owner(Owner), - In(64*1024), Out(4*1024), - ServerName(Srv) +// HttpServerState::HttpServerState - Constructor /*{{{*/ +HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(64*1024), Out(4*1024) { + TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); Reset(); } /*}}}*/ -// ServerState::Open - Open a connection to the server /*{{{*/ +// HttpServerState::Open - Open a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* This opens a connection to the server. */ -bool ServerState::Open() +bool HttpServerState::Open() { // Use the already open connection if possible. if (ServerFd != -1) @@ -373,72 +361,18 @@ bool ServerState::Open() return true; } /*}}}*/ -// ServerState::Close - Close a connection to the server /*{{{*/ +// HttpServerState::Close - Close a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* */ -bool ServerState::Close() +bool HttpServerState::Close() { close(ServerFd); ServerFd = -1; return true; } /*}}}*/ -// ServerState::RunHeaders - Get the headers before the data /*{{{*/ -// --------------------------------------------------------------------- -/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header - parse error occurred */ -ServerState::RunHeadersResult ServerState::RunHeaders() -{ - State = Header; - - Owner->Status(_("Waiting for headers")); - - Major = 0; - Minor = 0; - Result = 0; - Size = 0; - StartPos = 0; - Encoding = Closes; - HaveContent = false; - time(&Date); - - do - { - string Data; - if (In.WriteTillEl(Data) == false) - continue; - - if (Debug == true) - clog << Data; - - for (string::const_iterator I = Data.begin(); I < Data.end(); ++I) - { - string::const_iterator J = I; - for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J); - if (HeaderLine(string(I,J)) == false) - return RUN_HEADERS_PARSE_ERROR; - I = J; - } - - // 100 Continue is a Nop... - if (Result == 100) - continue; - - // Tidy up the connection persistance state. - if (Encoding == Closes && HaveContent == true) - Persistent = false; - - return RUN_HEADERS_OK; - } - while (Owner->Go(false,this) == true); - - return RUN_HEADERS_IO_ERROR; -} - /*}}}*/ -// ServerState::RunData - Transfer the data from the socket /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool ServerState::RunData() +// HttpServerState::RunData - Transfer the data from the socket /*{{{*/ +bool HttpServerState::RunData(FileFd * const File) { State = Data; @@ -456,7 +390,7 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; @@ -474,7 +408,7 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true && Data.length() <= 2) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; return !_error->PendingError(); @@ -482,7 +416,7 @@ bool ServerState::RunData() // Transfer the block In.Limit(Len); - while (Owner->Go(true,this) == true) + while (Go(true, File) == true) if (In.IsLimit() == true) break; @@ -498,7 +432,7 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; } @@ -521,147 +455,217 @@ bool ServerState::RunData() In.Limit(-1); return !_error->PendingError(); } - while (Owner->Go(true,this) == true); + while (Go(true, File) == true); } - return Owner->Flush(this) && !_error->PendingError(); + return Owner->Flush() && !_error->PendingError(); } /*}}}*/ -// ServerState::HeaderLine - Process a header line /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool ServerState::HeaderLine(string Line) +bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/ { - if (Line.empty() == true) - return true; + return In.WriteTillEl(Data); +} + /*}}}*/ +bool HttpServerState::LoadNextResponse(bool const ToFile, FileFd * const File)/*{{{*/ +{ + return Go(ToFile, File); +} + /*}}}*/ +bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/ +{ + return Out.Read(Data); +} + /*}}}*/ +bool HttpServerState::IsOpen() /*{{{*/ +{ + return (ServerFd != -1); +} + /*}}}*/ +bool HttpServerState::InitHashes(FileFd &File) /*{{{*/ +{ + delete In.Hash; + In.Hash = new Hashes; - string::size_type Pos = Line.find(' '); - if (Pos == string::npos || Pos+1 > Line.length()) + // Set the expected size and read file for the hashes + if (StartPos >= 0) { - // Blah, some servers use "connection:closes", evil. - Pos = Line.find(':'); - if (Pos == string::npos || Pos + 2 > Line.length()) - return _error->Error(_("Bad header line")); - Pos++; + File.Truncate(StartPos); + + return In.Hash->AddFD(File, StartPos); } + return true; +} + /*}}}*/ +Hashes * HttpServerState::GetHashes() /*{{{*/ +{ + return In.Hash; +} + /*}}}*/ +// HttpServerState::Die - The server has closed the connection. /*{{{*/ +bool HttpServerState::Die(FileFd &File) +{ + unsigned int LErrno = errno; - // Parse off any trailing spaces between the : and the next word. - string::size_type Pos2 = Pos; - while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) - Pos2++; - - string Tag = string(Line,0,Pos); - string Val = string(Line,Pos2); - - if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) + // Dump the buffer to the file + if (State == ServerState::Data) { - // Evil servers return no version - if (Line[4] == '/') - { - int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code); - if (elements == 3) - { - Code[0] = '\0'; - if (Debug == true) - clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl; - } - else if (elements != 4) - return _error->Error(_("The HTTP server sent an invalid reply header")); - } - else + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File.Name() != "/dev/null") + SetNonBlock(File.Fd(),false); + while (In.WriteSpace() == true) { - Major = 0; - Minor = 9; - if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2) - return _error->Error(_("The HTTP server sent an invalid reply header")); - } + if (In.Write(File.Fd()) == false) + return _error->Errno("write",_("Error writing to the file")); - /* Check the HTTP response header to get the default persistance - state. */ - if (Major < 1) - Persistent = false; - else - { - if (Major == 1 && Minor == 0) - Persistent = false; - else - Persistent = true; + // Done + if (In.IsLimit() == true) + return true; } + } - return true; - } - - if (stringcasecmp(Tag,"Content-Length:") == 0) + // See if this is because the server finished the data stream + if (In.IsLimit() == false && State != HttpServerState::Header && + Encoding != HttpServerState::Closes) { - if (Encoding == Closes) - Encoding = Stream; - HaveContent = true; - - // The length is already set from the Content-Range header - if (StartPos != 0) - return true; - - Size = strtoull(Val.c_str(), NULL, 10); - if (Size >= std::numeric_limits::max()) - return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); - else if (Size == 0) - HaveContent = false; - return true; + Close(); + if (LErrno == 0) + return _error->Error(_("Error reading from server. Remote end closed connection")); + errno = LErrno; + return _error->Errno("read",_("Error reading from server")); } - - if (stringcasecmp(Tag,"Content-Type:") == 0) + else { - HaveContent = true; + In.Limit(-1); + + // Nothing left in the buffer + if (In.WriteSpace() == false) + return false; + + // We may have got multiple responses back in one packet.. + Close(); return true; } - - if (stringcasecmp(Tag,"Content-Range:") == 0) - { - HaveContent = true; - // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 - if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) + return false; +} + /*}}}*/ +// HttpServerState::Flush - Dump the buffer into the file /*{{{*/ +// --------------------------------------------------------------------- +/* This takes the current input buffer from the Server FD and writes it + into the file */ +bool HttpServerState::Flush(FileFd * const File) +{ + if (File != NULL) + { + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); + if (In.WriteSpace() == false) + return true; + + while (In.WriteSpace() == true) { - StartPos = 1; // ignore Content-Length, it would override Size - HaveContent = false; + if (In.Write(File->Fd()) == false) + return _error->Errno("write",_("Error writing to file")); + if (In.IsLimit() == true) + return true; } - else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) - return _error->Error(_("The HTTP server sent an invalid Content-Range header")); - if ((unsigned long long)StartPos > Size) - return _error->Error(_("This HTTP server has broken range support")); - return true; + + if (In.IsLimit() == true || Encoding == ServerState::Closes) + return true; } + return false; +} + /*}}}*/ +// HttpServerState::Go - Run a single loop /*{{{*/ +// --------------------------------------------------------------------- +/* This runs the select loop over the server FDs, Output file FDs and + stdin. */ +bool HttpServerState::Go(bool ToFile, FileFd * const File) +{ + // Server has closed the connection + if (ServerFd == -1 && (In.WriteSpace() == false || + ToFile == false)) + return false; + + fd_set rfds,wfds; + FD_ZERO(&rfds); + FD_ZERO(&wfds); + + /* Add the server. We only send more requests if the connection will + be persisting */ + if (Out.WriteSpace() == true && ServerFd != -1 + && Persistent == true) + FD_SET(ServerFd,&wfds); + if (In.ReadSpace() == true && ServerFd != -1) + FD_SET(ServerFd,&rfds); - if (stringcasecmp(Tag,"Transfer-Encoding:") == 0) + // Add the file + int FileFD = -1; + if (File != NULL) + FileFD = File->Fd(); + + if (In.WriteSpace() == true && ToFile == true && FileFD != -1) + FD_SET(FileFD,&wfds); + + // Add stdin + if (_config->FindB("Acquire::http::DependOnSTDIN", true) == true) + FD_SET(STDIN_FILENO,&rfds); + + // Figure out the max fd + int MaxFd = FileFD; + if (MaxFd < ServerFd) + MaxFd = ServerFd; + + // Select + struct timeval tv; + tv.tv_sec = TimeOut; + tv.tv_usec = 0; + int Res = 0; + if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0) { - HaveContent = true; - if (stringcasecmp(Val,"chunked") == 0) - Encoding = Chunked; - return true; + if (errno == EINTR) + return true; + return _error->Errno("select",_("Select failed")); } - - if (stringcasecmp(Tag,"Connection:") == 0) + + if (Res == 0) { - if (stringcasecmp(Val,"close") == 0) - Persistent = false; - if (stringcasecmp(Val,"keep-alive") == 0) - Persistent = true; - return true; + _error->Error(_("Connection timed out")); + return Die(*File); } - if (stringcasecmp(Tag,"Last-Modified:") == 0) + // Handle server IO + if (ServerFd != -1 && FD_ISSET(ServerFd,&rfds)) { - if (RFC1123StrToTime(Val.c_str(), Date) == false) - return _error->Error(_("Unknown date format")); - return true; + errno = 0; + if (In.Read(ServerFd) == false) + return Die(*File); + } + + if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds)) + { + errno = 0; + if (Out.Write(ServerFd) == false) + return Die(*File); } - if (stringcasecmp(Tag,"Location:") == 0) + // Send data to the file + if (FileFD != -1 && FD_ISSET(FileFD,&wfds)) { - Location = Val; - return true; + if (In.Write(FileFD) == false) + return _error->Errno("write",_("Error writing to output file")); } + // Handle commands from APT + if (FD_ISSET(STDIN_FILENO,&rfds)) + { + if (Owner->Run(true) != -1) + exit(100); + } + return true; } /*}}}*/ @@ -669,7 +673,7 @@ bool ServerState::HeaderLine(string Line) // HttpMethod::SendReq - Send the HTTP request /*{{{*/ // --------------------------------------------------------------------- /* This places the http request in the outbound buffer */ -void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) +void HttpMethod::SendReq(FetchItem *Itm) { URI Uri = Itm->Uri; @@ -695,7 +699,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) but while its a must for all servers to accept absolute URIs, it is assumed clients will sent an absolute path for non-proxies */ std::string requesturi; - if (Proxy.empty() == true || Proxy.Host.empty()) + if (Server->Proxy.empty() == true || Server->Proxy.Host.empty()) requesturi = Uri.Path; else requesturi = Itm->Uri; @@ -763,9 +767,9 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) } } - if (Proxy.User.empty() == false || Proxy.Password.empty() == false) + if (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false) Req += string("Proxy-Authorization: Basic ") + - Base64Encode(Proxy.User + ":" + Proxy.Password) + "\r\n"; + Base64Encode(Server->Proxy.User + ":" + Server->Proxy.Password) + "\r\n"; maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); if (Uri.User.empty() == false || Uri.Password.empty() == false) @@ -779,360 +783,18 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) if (Debug == true) cerr << Req << endl; - Out.Read(Req); -} - /*}}}*/ -// HttpMethod::Go - Run a single loop /*{{{*/ -// --------------------------------------------------------------------- -/* This runs the select loop over the server FDs, Output file FDs and - stdin. */ -bool HttpMethod::Go(bool ToFile,ServerState *Srv) -{ - // Server has closed the connection - if (Srv->ServerFd == -1 && (Srv->In.WriteSpace() == false || - ToFile == false)) - return false; - - fd_set rfds,wfds; - FD_ZERO(&rfds); - FD_ZERO(&wfds); - - /* Add the server. We only send more requests if the connection will - be persisting */ - if (Srv->Out.WriteSpace() == true && Srv->ServerFd != -1 - && Srv->Persistent == true) - FD_SET(Srv->ServerFd,&wfds); - if (Srv->In.ReadSpace() == true && Srv->ServerFd != -1) - FD_SET(Srv->ServerFd,&rfds); - - // Add the file - int FileFD = -1; - if (File != 0) - FileFD = File->Fd(); - - if (Srv->In.WriteSpace() == true && ToFile == true && FileFD != -1) - FD_SET(FileFD,&wfds); - - // Add stdin - if (_config->FindB("Acquire::http::DependOnSTDIN", true) == true) - FD_SET(STDIN_FILENO,&rfds); - - // Figure out the max fd - int MaxFd = FileFD; - if (MaxFd < Srv->ServerFd) - MaxFd = Srv->ServerFd; - - // Select - struct timeval tv; - tv.tv_sec = TimeOut; - tv.tv_usec = 0; - int Res = 0; - if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0) - { - if (errno == EINTR) - return true; - return _error->Errno("select",_("Select failed")); - } - - if (Res == 0) - { - _error->Error(_("Connection timed out")); - return ServerDie(Srv); - } - - // Handle server IO - if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&rfds)) - { - errno = 0; - if (Srv->In.Read(Srv->ServerFd) == false) - return ServerDie(Srv); - } - - if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&wfds)) - { - errno = 0; - if (Srv->Out.Write(Srv->ServerFd) == false) - return ServerDie(Srv); - } - - // Send data to the file - if (FileFD != -1 && FD_ISSET(FileFD,&wfds)) - { - if (Srv->In.Write(FileFD) == false) - return _error->Errno("write",_("Error writing to output file")); - } - - // Handle commands from APT - if (FD_ISSET(STDIN_FILENO,&rfds)) - { - if (Run(true) != -1) - exit(100); - } - - return true; -} - /*}}}*/ -// HttpMethod::Flush - Dump the buffer into the file /*{{{*/ -// --------------------------------------------------------------------- -/* This takes the current input buffer from the Server FD and writes it - into the file */ -bool HttpMethod::Flush(ServerState *Srv) -{ - if (File != 0) - { - // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking - // can't be set - if (File->Name() != "/dev/null") - SetNonBlock(File->Fd(),false); - if (Srv->In.WriteSpace() == false) - return true; - - while (Srv->In.WriteSpace() == true) - { - if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write",_("Error writing to file")); - if (Srv->In.IsLimit() == true) - return true; - } - - if (Srv->In.IsLimit() == true || Srv->Encoding == ServerState::Closes) - return true; - } - return false; + Server->WriteResponse(Req); } /*}}}*/ -// HttpMethod::ServerDie - The server has closed the connection. /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool HttpMethod::ServerDie(ServerState *Srv) -{ - unsigned int LErrno = errno; - - // Dump the buffer to the file - if (Srv->State == ServerState::Data) - { - // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking - // can't be set - if (File->Name() != "/dev/null") - SetNonBlock(File->Fd(),false); - while (Srv->In.WriteSpace() == true) - { - if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write",_("Error writing to the file")); - - // Done - if (Srv->In.IsLimit() == true) - return true; - } - } - - // See if this is because the server finished the data stream - if (Srv->In.IsLimit() == false && Srv->State != ServerState::Header && - Srv->Encoding != ServerState::Closes) - { - Srv->Close(); - if (LErrno == 0) - return _error->Error(_("Error reading from server. Remote end closed connection")); - errno = LErrno; - return _error->Errno("read",_("Error reading from server")); - } - else - { - Srv->In.Limit(-1); - - // Nothing left in the buffer - if (Srv->In.WriteSpace() == false) - return false; - - // We may have got multiple responses back in one packet.. - Srv->Close(); - return true; - } - - return false; -} - /*}}}*/ -// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/ -// --------------------------------------------------------------------- -/* We look at the header data we got back from the server and decide what - to do. Returns DealWithHeadersResult (see http.h for details). - */ -HttpMethod::DealWithHeadersResult -HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) -{ - // Not Modified - if (Srv->Result == 304) - { - unlink(Queue->DestFile.c_str()); - Res.IMSHit = true; - Res.LastModified = Queue->LastModified; - return IMS_HIT; - } - - /* Redirect - * - * Note that it is only OK for us to treat all redirection the same - * because we *always* use GET, not other HTTP methods. There are - * three redirection codes for which it is not appropriate that we - * redirect. Pass on those codes so the error handling kicks in. - */ - if (AllowRedirect - && (Srv->Result > 300 && Srv->Result < 400) - && (Srv->Result != 300 // Multiple Choices - && Srv->Result != 304 // Not Modified - && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved) - { - if (Srv->Location.empty() == true); - else if (Srv->Location[0] == '/' && Queue->Uri.empty() == false) - { - URI Uri = Queue->Uri; - if (Uri.Host.empty() == false) - NextURI = URI::SiteOnly(Uri); - else - NextURI.clear(); - NextURI.append(DeQuoteString(Srv->Location)); - return TRY_AGAIN_OR_REDIRECT; - } - else - { - NextURI = DeQuoteString(Srv->Location); - URI tmpURI = NextURI; - // Do not allow a redirection to switch protocol - if (tmpURI.Access == "http") - return TRY_AGAIN_OR_REDIRECT; - } - /* else pass through for error message */ - } - // retry after an invalid range response without partial data - else if (Srv->Result == 416) - { - struct stat SBuf; - if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) - { - if ((unsigned long long)SBuf.st_size == Srv->Size) - { - // the file is completely downloaded, but was not moved - Srv->StartPos = Srv->Size; - Srv->Result = 200; - Srv->HaveContent = false; - } - else if (unlink(Queue->DestFile.c_str()) == 0) - { - NextURI = Queue->Uri; - return TRY_AGAIN_OR_REDIRECT; - } - } - } - - /* We have a reply we dont handle. This should indicate a perm server - failure */ - if (Srv->Result < 200 || Srv->Result >= 300) - { - char err[255]; - snprintf(err,sizeof(err)-1,"HttpError%i",Srv->Result); - SetFailReason(err); - _error->Error("%u %s",Srv->Result,Srv->Code); - if (Srv->HaveContent == true) - return ERROR_WITH_CONTENT_PAGE; - return ERROR_UNRECOVERABLE; - } - - // This is some sort of 2xx 'data follows' reply - Res.LastModified = Srv->Date; - Res.Size = Srv->Size; - - // Open the file - delete File; - File = new FileFd(Queue->DestFile,FileFd::WriteAny); - if (_error->PendingError() == true) - return ERROR_NOT_FROM_SERVER; - - FailFile = Queue->DestFile; - FailFile.c_str(); // Make sure we dont do a malloc in the signal handler - FailFd = File->Fd(); - FailTime = Srv->Date; - - delete Srv->In.Hash; - Srv->In.Hash = new Hashes; - - // Set the expected size and read file for the hashes - if (Srv->StartPos >= 0) - { - Res.ResumePoint = Srv->StartPos; - File->Truncate(Srv->StartPos); - - if (Srv->In.Hash->AddFD(*File,Srv->StartPos) == false) - { - _error->Errno("read",_("Problem hashing file")); - return ERROR_NOT_FROM_SERVER; - } - } - - SetNonBlock(File->Fd(),true); - return FILE_IS_OPEN; -} - /*}}}*/ -// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/ -// --------------------------------------------------------------------- -/* This closes and timestamps the open file. This is neccessary to get - resume behavoir on user abort */ -void HttpMethod::SigTerm(int) -{ - if (FailFd == -1) - _exit(100); - close(FailFd); - - // Timestamp - struct utimbuf UBuf; - UBuf.actime = FailTime; - UBuf.modtime = FailTime; - utime(FailFile.c_str(),&UBuf); - - _exit(100); -} - /*}}}*/ -// HttpMethod::Fetch - Fetch an item /*{{{*/ -// --------------------------------------------------------------------- -/* This adds an item to the pipeline. We keep the pipeline at a fixed - depth. */ -bool HttpMethod::Fetch(FetchItem *) -{ - if (Server == 0) - return true; - - // Queue the requests - int Depth = -1; - for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; - I = I->Next, Depth++) - { - // If pipelining is disabled, we only queue 1 request - if (Server->Pipeline == false && Depth >= 0) - break; - - // Make sure we stick with the same server - if (Server->Comp(I->Uri) == false) - break; - if (QueueBack == I) - { - QueueBack = I->Next; - SendReq(I,Server->Out); - continue; - } - } - - return true; -}; - /*}}}*/ // HttpMethod::Configuration - Handle a configuration message /*{{{*/ // --------------------------------------------------------------------- /* We stash the desired pipeline depth */ bool HttpMethod::Configuration(string Message) { - if (pkgAcqMethod::Configuration(Message) == false) + if (ServerMethod::Configuration(Message) == false) return false; - + AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); - TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", PipelineDepth); Debug = _config->FindB("Debug::Acquire::http",false); @@ -1144,260 +806,6 @@ bool HttpMethod::Configuration(string Message) return true; } /*}}}*/ -// HttpMethod::Loop - Main loop /*{{{*/ -// --------------------------------------------------------------------- -/* */ -int HttpMethod::Loop() -{ - typedef vector StringVector; - typedef vector::iterator StringVectorIterator; - map Redirected; - - signal(SIGTERM,SigTerm); - signal(SIGINT,SigTerm); - - Server = 0; - - int FailCounter = 0; - while (1) - { - // We have no commands, wait for some to arrive - if (Queue == 0) - { - if (WaitFd(STDIN_FILENO) == false) - return 0; - } - - /* Run messages, we can accept 0 (no message) if we didn't - do a WaitFd above.. Otherwise the FD is closed. */ - int Result = Run(true); - if (Result != -1 && (Result != 0 || Queue == 0)) - { - if(FailReason.empty() == false || - _config->FindB("Acquire::http::DependOnSTDIN", true) == true) - return 100; - else - return 0; - } - - if (Queue == 0) - continue; - - // Connect to the server - if (Server == 0 || Server->Comp(Queue->Uri) == false) - { - delete Server; - Server = new ServerState(Queue->Uri,this); - } - /* If the server has explicitly said this is the last connection - then we pre-emptively shut down the pipeline and tear down - the connection. This will speed up HTTP/1.0 servers a tad - since we don't have to wait for the close sequence to - complete */ - if (Server->Persistent == false) - Server->Close(); - - // Reset the pipeline - if (Server->ServerFd == -1) - QueueBack = Queue; - - // Connnect to the host - if (Server->Open() == false) - { - Fail(true); - delete Server; - Server = 0; - continue; - } - - // Fill the pipeline. - Fetch(0); - - // Fetch the next URL header data from the server. - switch (Server->RunHeaders()) - { - case ServerState::RUN_HEADERS_OK: - break; - - // The header data is bad - case ServerState::RUN_HEADERS_PARSE_ERROR: - { - _error->Error(_("Bad header data")); - Fail(true); - RotateDNS(); - continue; - } - - // The server closed a connection during the header get.. - default: - case ServerState::RUN_HEADERS_IO_ERROR: - { - FailCounter++; - _error->Discard(); - Server->Close(); - Server->Pipeline = false; - - if (FailCounter >= 2) - { - Fail(_("Connection failed"),true); - FailCounter = 0; - } - - RotateDNS(); - continue; - } - }; - - // Decide what to do. - FetchResult Res; - Res.Filename = Queue->DestFile; - switch (DealWithHeaders(Res,Server)) - { - // Ok, the file is Open - case FILE_IS_OPEN: - { - URIStart(Res); - - // Run the data - bool Result = true; - if (Server->HaveContent) - Result = Server->RunData(); - - /* If the server is sending back sizeless responses then fill in - the size now */ - if (Res.Size == 0) - Res.Size = File->Size(); - - // Close the file, destroy the FD object and timestamp it - FailFd = -1; - delete File; - File = 0; - - // Timestamp - struct utimbuf UBuf; - time(&UBuf.actime); - UBuf.actime = Server->Date; - UBuf.modtime = Server->Date; - utime(Queue->DestFile.c_str(),&UBuf); - - // Send status to APT - if (Result == true) - { - Res.TakeHashes(*Server->In.Hash); - URIDone(Res); - } - else - { - if (Server->ServerFd == -1) - { - FailCounter++; - _error->Discard(); - Server->Close(); - - if (FailCounter >= 2) - { - Fail(_("Connection failed"),true); - FailCounter = 0; - } - - QueueBack = Queue; - } - else - Fail(true); - } - break; - } - - // IMS hit - case IMS_HIT: - { - URIDone(Res); - break; - } - - // Hard server error, not found or something - case ERROR_UNRECOVERABLE: - { - Fail(); - break; - } - - // Hard internal error, kill the connection and fail - case ERROR_NOT_FROM_SERVER: - { - delete File; - File = 0; - - Fail(); - RotateDNS(); - Server->Close(); - break; - } - - // We need to flush the data, the header is like a 404 w/ error text - case ERROR_WITH_CONTENT_PAGE: - { - Fail(); - - // Send to content to dev/null - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(); - delete File; - File = 0; - break; - } - - // Try again with a new URL - case TRY_AGAIN_OR_REDIRECT: - { - // Clear rest of response if there is content - if (Server->HaveContent) - { - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(); - delete File; - File = 0; - } - - /* Detect redirect loops. No more redirects are allowed - after the same URI is seen twice in a queue item. */ - StringVector &R = Redirected[Queue->DestFile]; - bool StopRedirects = false; - if (R.empty() == true) - R.push_back(Queue->Uri); - else if (R[0] == "STOP" || R.size() > 10) - StopRedirects = true; - else - { - for (StringVectorIterator I = R.begin(); I != R.end(); ++I) - if (Queue->Uri == *I) - { - R[0] = "STOP"; - break; - } - - R.push_back(Queue->Uri); - } - - if (StopRedirects == false) - Redirect(NextURI); - else - Fail(); - - break; - } - - default: - Fail(_("Internal error")); - break; - } - - FailCounter = 0; - } - - return 0; -} - /*}}}*/ // HttpMethod::AutoDetectProxy - auto detect proxy /*{{{*/ // --------------------------------------------------------------------- /* */ @@ -1450,5 +858,8 @@ bool HttpMethod::AutoDetectProxy() return true; } /*}}}*/ - - +ServerState * HttpMethod::CreateServerState(URI uri) /*{{{*/ +{ + return new HttpServerState(uri, this); +} + /*}}}*/ diff --git a/methods/http.h b/methods/http.h index 7446119cd..112ce171d 100644 --- a/methods/http.h +++ b/methods/http.h @@ -15,6 +15,8 @@ #include +#include "server.h" + using std::cout; using std::endl; @@ -31,7 +33,7 @@ class CircleBuf unsigned long long StrPos; unsigned long long MaxGet; struct timeval Start; - + static unsigned long long BwReadLimit; static unsigned long long BwTickReadData; static struct timeval BwReadTick; @@ -54,21 +56,20 @@ class CircleBuf return Sz; } void FillOut(); - + public: - Hashes *Hash; - + // Read data in bool Read(int Fd); bool Read(std::string Data); - + // Write data out bool Write(int Fd); bool WriteTillEl(std::string &Data,bool Single = false); - + // Control the write limit - void Limit(long long Max) {if (Max == -1) MaxGet = 0-1; else MaxGet = OutP + Max;} + void Limit(long long Max) {if (Max == -1) MaxGet = 0-1; else MaxGet = OutP + Max;} bool IsLimit() const {return MaxGet == OutP;}; void Print() const {cout << MaxGet << ',' << OutP << endl;}; @@ -84,114 +85,55 @@ class CircleBuf ~CircleBuf(); }; -struct ServerState +struct HttpServerState: public ServerState { - // This is the last parsed Header Line - unsigned int Major; - unsigned int Minor; - unsigned int Result; - char Code[360]; - - // These are some statistics from the last parsed header lines - unsigned long long Size; - signed long long StartPos; - time_t Date; - bool HaveContent; - enum {Chunked,Stream,Closes} Encoding; - enum {Header, Data} State; - bool Persistent; - std::string Location; - - // This is a Persistent attribute of the server itself. - bool Pipeline; - - HttpMethod *Owner; - // This is the connection itself. Output is data FROM the server CircleBuf In; CircleBuf Out; int ServerFd; - URI ServerName; - - bool HeaderLine(std::string Line); - bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;}; - void Reset() {Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; Size = 0; - StartPos = 0; Encoding = Closes; time(&Date); HaveContent = false; - State = Header; Persistent = false; ServerFd = -1; - Pipeline = true;}; - - /** \brief Result of the header acquire */ - enum RunHeadersResult { - /** \brief Header ok */ - RUN_HEADERS_OK, - /** \brief IO error while retrieving */ - RUN_HEADERS_IO_ERROR, - /** \brief Parse error after retrieving */ - RUN_HEADERS_PARSE_ERROR, - }; - /** \brief Get the headers before the data */ - RunHeadersResult RunHeaders(); - /** \brief Transfer the data from the socket */ - bool RunData(); - - bool Open(); - bool Close(); - - ServerState(URI Srv,HttpMethod *Owner); - ~ServerState() {Close();}; + + protected: + virtual bool ReadHeaderLines(std::string &Data); + virtual bool LoadNextResponse(bool const ToFile, FileFd * const File); + virtual bool WriteResponse(std::string const &Data); + + public: + virtual void Reset() { ServerState::Reset(); ServerFd = -1; }; + + virtual bool RunData(FileFd * const File); + + virtual bool Open(); + virtual bool IsOpen(); + virtual bool Close(); + virtual bool InitHashes(FileFd &File); + virtual Hashes * GetHashes(); + virtual bool Die(FileFd &File); + virtual bool Flush(FileFd * const File); + virtual bool Go(bool ToFile, FileFd * const File); + + HttpServerState(URI Srv, HttpMethod *Owner); + virtual ~HttpServerState() {Close();}; }; -class HttpMethod : public pkgAcqMethod +class HttpMethod : public ServerMethod { - void SendReq(FetchItem *Itm,CircleBuf &Out); - bool Go(bool ToFile,ServerState *Srv); - bool Flush(ServerState *Srv); - bool ServerDie(ServerState *Srv); - - /** \brief Result of the header parsing */ - enum DealWithHeadersResult { - /** \brief The file is open and ready */ - FILE_IS_OPEN, - /** \brief We got a IMS hit, the file has not changed */ - IMS_HIT, - /** \brief The server reported a unrecoverable error */ - ERROR_UNRECOVERABLE, - /** \brief The server reported a error with a error content page */ - ERROR_WITH_CONTENT_PAGE, - /** \brief An error on the client side */ - ERROR_NOT_FROM_SERVER, - /** \brief A redirect or retry request */ - TRY_AGAIN_OR_REDIRECT - }; - /** \brief Handle the retrieved header data */ - DealWithHeadersResult DealWithHeaders(FetchResult &Res,ServerState *Srv); + public: + virtual void SendReq(FetchItem *Itm); /** \brief Try to AutoDetect the proxy */ bool AutoDetectProxy(); virtual bool Configuration(std::string Message); - - // In the event of a fatal signal this file will be closed and timestamped. - static std::string FailFile; - static int FailFd; - static time_t FailTime; - static void SigTerm(int); + + virtual ServerState * CreateServerState(URI uri); protected: - virtual bool Fetch(FetchItem *); - - std::string NextURI; std::string AutoDetectProxyCmd; public: - friend struct ServerState; - - FileFd *File; - ServerState *Server; - - int Loop(); - - HttpMethod() : pkgAcqMethod("1.2",Pipeline | SendConfig) + friend struct HttpServerState; + + HttpMethod() : ServerMethod("1.2",Pipeline | SendConfig) { File = 0; Server = 0; diff --git a/methods/makefile b/methods/makefile index 294c55d23..f8098de74 100644 --- a/methods/makefile +++ b/methods/makefile @@ -48,7 +48,7 @@ include $(PROGRAM_H) PROGRAM=http SLIBS = -lapt-pkg $(SOCKETLIBS) $(INTLLIBS) LIB_MAKES = apt-pkg/makefile -SOURCE = http.cc http_main.cc rfc2553emu.cc connect.cc +SOURCE = http.cc http_main.cc rfc2553emu.cc connect.cc server.cc include $(PROGRAM_H) # The https method @@ -83,7 +83,7 @@ include $(PROGRAM_H) PROGRAM=mirror SLIBS = -lapt-pkg $(SOCKETLIBS) LIB_MAKES = apt-pkg/makefile -SOURCE = mirror.cc http.cc rfc2553emu.cc connect.cc +SOURCE = mirror.cc http.cc rfc2553emu.cc connect.cc server.cc include $(PROGRAM_H) # SSH method symlink diff --git a/methods/server.cc b/methods/server.cc new file mode 100644 index 000000000..a2128441c --- /dev/null +++ b/methods/server.cc @@ -0,0 +1,665 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +/* ###################################################################### + + HTTP and HTTPS share a lot of common code and these classes are + exactly the dumping ground for this common code + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Internet stuff +#include + +#include "config.h" +#include "connect.h" +#include "rfc2553emu.h" +#include "http.h" + +#include + /*}}}*/ +using namespace std; + +string ServerMethod::FailFile; +int ServerMethod::FailFd = -1; +time_t ServerMethod::FailTime = 0; + +// ServerState::RunHeaders - Get the headers before the data /*{{{*/ +// --------------------------------------------------------------------- +/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header + parse error occurred */ +ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File) +{ + State = Header; + + Owner->Status(_("Waiting for headers")); + + Major = 0; + Minor = 0; + Result = 0; + Size = 0; + StartPos = 0; + Encoding = Closes; + HaveContent = false; + time(&Date); + + do + { + string Data; + if (ReadHeaderLines(Data) == false) + continue; + + if (Owner->Debug == true) + clog << Data; + + for (string::const_iterator I = Data.begin(); I < Data.end(); ++I) + { + string::const_iterator J = I; + for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J); + if (HeaderLine(string(I,J)) == false) + return RUN_HEADERS_PARSE_ERROR; + I = J; + } + + // 100 Continue is a Nop... + if (Result == 100) + continue; + + // Tidy up the connection persistance state. + if (Encoding == Closes && HaveContent == true) + Persistent = false; + + return RUN_HEADERS_OK; + } + while (LoadNextResponse(false, File) == true); + + return RUN_HEADERS_IO_ERROR; +} + /*}}}*/ +// ServerState::HeaderLine - Process a header line /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool ServerState::HeaderLine(string Line) +{ + if (Line.empty() == true) + return true; + + string::size_type Pos = Line.find(' '); + if (Pos == string::npos || Pos+1 > Line.length()) + { + // Blah, some servers use "connection:closes", evil. + Pos = Line.find(':'); + if (Pos == string::npos || Pos + 2 > Line.length()) + return _error->Error(_("Bad header line")); + Pos++; + } + + // Parse off any trailing spaces between the : and the next word. + string::size_type Pos2 = Pos; + while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) + Pos2++; + + string Tag = string(Line,0,Pos); + string Val = string(Line,Pos2); + + if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) + { + // Evil servers return no version + if (Line[4] == '/') + { + int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code); + if (elements == 3) + { + Code[0] = '\0'; + if (Owner->Debug == true) + clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl; + } + else if (elements != 4) + return _error->Error(_("The HTTP server sent an invalid reply header")); + } + else + { + Major = 0; + Minor = 9; + if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2) + return _error->Error(_("The HTTP server sent an invalid reply header")); + } + + /* Check the HTTP response header to get the default persistance + state. */ + if (Major < 1) + Persistent = false; + else + { + if (Major == 1 && Minor == 0) + Persistent = false; + else + Persistent = true; + } + + return true; + } + + if (stringcasecmp(Tag,"Content-Length:") == 0) + { + if (Encoding == Closes) + Encoding = Stream; + HaveContent = true; + + // The length is already set from the Content-Range header + if (StartPos != 0) + return true; + + Size = strtoull(Val.c_str(), NULL, 10); + if (Size >= std::numeric_limits::max()) + return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); + else if (Size == 0) + HaveContent = false; + return true; + } + + if (stringcasecmp(Tag,"Content-Type:") == 0) + { + HaveContent = true; + return true; + } + + if (stringcasecmp(Tag,"Content-Range:") == 0) + { + HaveContent = true; + + // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 + if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) + { + StartPos = 1; // ignore Content-Length, it would override Size + HaveContent = false; + } + else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) + return _error->Error(_("The HTTP server sent an invalid Content-Range header")); + if ((unsigned long long)StartPos > Size) + return _error->Error(_("This HTTP server has broken range support")); + return true; + } + + if (stringcasecmp(Tag,"Transfer-Encoding:") == 0) + { + HaveContent = true; + if (stringcasecmp(Val,"chunked") == 0) + Encoding = Chunked; + return true; + } + + if (stringcasecmp(Tag,"Connection:") == 0) + { + if (stringcasecmp(Val,"close") == 0) + Persistent = false; + if (stringcasecmp(Val,"keep-alive") == 0) + Persistent = true; + return true; + } + + if (stringcasecmp(Tag,"Last-Modified:") == 0) + { + if (RFC1123StrToTime(Val.c_str(), Date) == false) + return _error->Error(_("Unknown date format")); + return true; + } + + if (stringcasecmp(Tag,"Location:") == 0) + { + Location = Val; + return true; + } + + return true; +} + /*}}}*/ +// ServerState::ServerState - Constructor /*{{{*/ +ServerState::ServerState(URI Srv, ServerMethod *Owner) : ServerName(Srv), TimeOut(120), Owner(Owner) +{ + Reset(); +} + /*}}}*/ + +bool ServerMethod::Configuration(string Message) /*{{{*/ +{ + return pkgAcqMethod::Configuration(Message); +} + /*}}}*/ + +// ServerMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/ +// --------------------------------------------------------------------- +/* We look at the header data we got back from the server and decide what + to do. Returns DealWithHeadersResult (see http.h for details). + */ +ServerMethod::DealWithHeadersResult +ServerMethod::DealWithHeaders(FetchResult &Res) +{ + // Not Modified + if (Server->Result == 304) + { + unlink(Queue->DestFile.c_str()); + Res.IMSHit = true; + Res.LastModified = Queue->LastModified; + return IMS_HIT; + } + + /* Redirect + * + * Note that it is only OK for us to treat all redirection the same + * because we *always* use GET, not other HTTP methods. There are + * three redirection codes for which it is not appropriate that we + * redirect. Pass on those codes so the error handling kicks in. + */ + if (AllowRedirect + && (Server->Result > 300 && Server->Result < 400) + && (Server->Result != 300 // Multiple Choices + && Server->Result != 304 // Not Modified + && Server->Result != 306)) // (Not part of HTTP/1.1, reserved) + { + if (Server->Location.empty() == true); + else if (Server->Location[0] == '/' && Queue->Uri.empty() == false) + { + URI Uri = Queue->Uri; + if (Uri.Host.empty() == false) + NextURI = URI::SiteOnly(Uri); + else + NextURI.clear(); + NextURI.append(DeQuoteString(Server->Location)); + return TRY_AGAIN_OR_REDIRECT; + } + else + { + NextURI = DeQuoteString(Server->Location); + URI tmpURI = NextURI; + // Do not allow a redirection to switch protocol + if (tmpURI.Access == "http") + return TRY_AGAIN_OR_REDIRECT; + } + /* else pass through for error message */ + } + // retry after an invalid range response without partial data + else if (Server->Result == 416) + { + struct stat SBuf; + if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) + { + if ((unsigned long long)SBuf.st_size == Server->Size) + { + // the file is completely downloaded, but was not moved + Server->StartPos = Server->Size; + Server->Result = 200; + Server->HaveContent = false; + } + else if (unlink(Queue->DestFile.c_str()) == 0) + { + NextURI = Queue->Uri; + return TRY_AGAIN_OR_REDIRECT; + } + } + } + + /* We have a reply we dont handle. This should indicate a perm server + failure */ + if (Server->Result < 200 || Server->Result >= 300) + { + char err[255]; + snprintf(err,sizeof(err)-1,"HttpError%i",Server->Result); + SetFailReason(err); + _error->Error("%u %s",Server->Result,Server->Code); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } + + // This is some sort of 2xx 'data follows' reply + Res.LastModified = Server->Date; + Res.Size = Server->Size; + + // Open the file + delete File; + File = new FileFd(Queue->DestFile,FileFd::WriteAny); + if (_error->PendingError() == true) + return ERROR_NOT_FROM_SERVER; + + FailFile = Queue->DestFile; + FailFile.c_str(); // Make sure we dont do a malloc in the signal handler + FailFd = File->Fd(); + FailTime = Server->Date; + + if (Server->InitHashes(*File) == false) + { + _error->Errno("read",_("Problem hashing file")); + return ERROR_NOT_FROM_SERVER; + } + if (Server->StartPos > 0) + Res.ResumePoint = Server->StartPos; + + SetNonBlock(File->Fd(),true); + return FILE_IS_OPEN; +} + /*}}}*/ +// ServerMethod::SigTerm - Handle a fatal signal /*{{{*/ +// --------------------------------------------------------------------- +/* This closes and timestamps the open file. This is neccessary to get + resume behavoir on user abort */ +void ServerMethod::SigTerm(int) +{ + if (FailFd == -1) + _exit(100); + close(FailFd); + + // Timestamp + struct utimbuf UBuf; + UBuf.actime = FailTime; + UBuf.modtime = FailTime; + utime(FailFile.c_str(),&UBuf); + + _exit(100); +} + /*}}}*/ +// ServerMethod::Fetch - Fetch an item /*{{{*/ +// --------------------------------------------------------------------- +/* This adds an item to the pipeline. We keep the pipeline at a fixed + depth. */ +bool ServerMethod::Fetch(FetchItem *) +{ + if (Server == 0) + return true; + + // Queue the requests + int Depth = -1; + for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; + I = I->Next, Depth++) + { + // If pipelining is disabled, we only queue 1 request + if (Server->Pipeline == false && Depth >= 0) + break; + + // Make sure we stick with the same server + if (Server->Comp(I->Uri) == false) + break; + if (QueueBack == I) + { + QueueBack = I->Next; + SendReq(I); + continue; + } + } + + return true; +}; + /*}}}*/ +// ServerMethod::Loop - Main loop /*{{{*/ +int ServerMethod::Loop() +{ + typedef vector StringVector; + typedef vector::iterator StringVectorIterator; + map Redirected; + + signal(SIGTERM,SigTerm); + signal(SIGINT,SigTerm); + + Server = 0; + + int FailCounter = 0; + while (1) + { + // We have no commands, wait for some to arrive + if (Queue == 0) + { + if (WaitFd(STDIN_FILENO) == false) + return 0; + } + + /* Run messages, we can accept 0 (no message) if we didn't + do a WaitFd above.. Otherwise the FD is closed. */ + int Result = Run(true); + if (Result != -1 && (Result != 0 || Queue == 0)) + { + if(FailReason.empty() == false || + _config->FindB("Acquire::http::DependOnSTDIN", true) == true) + return 100; + else + return 0; + } + + if (Queue == 0) + continue; + + // Connect to the server + if (Server == 0 || Server->Comp(Queue->Uri) == false) + { + delete Server; + Server = CreateServerState(Queue->Uri); + } + /* If the server has explicitly said this is the last connection + then we pre-emptively shut down the pipeline and tear down + the connection. This will speed up HTTP/1.0 servers a tad + since we don't have to wait for the close sequence to + complete */ + if (Server->Persistent == false) + Server->Close(); + + // Reset the pipeline + if (Server->IsOpen() == false) + QueueBack = Queue; + + // Connnect to the host + if (Server->Open() == false) + { + Fail(true); + delete Server; + Server = 0; + continue; + } + + // Fill the pipeline. + Fetch(0); + + // Fetch the next URL header data from the server. + switch (Server->RunHeaders(File)) + { + case ServerState::RUN_HEADERS_OK: + break; + + // The header data is bad + case ServerState::RUN_HEADERS_PARSE_ERROR: + { + _error->Error(_("Bad header data")); + Fail(true); + RotateDNS(); + continue; + } + + // The server closed a connection during the header get.. + default: + case ServerState::RUN_HEADERS_IO_ERROR: + { + FailCounter++; + _error->Discard(); + Server->Close(); + Server->Pipeline = false; + + if (FailCounter >= 2) + { + Fail(_("Connection failed"),true); + FailCounter = 0; + } + + RotateDNS(); + continue; + } + }; + + // Decide what to do. + FetchResult Res; + Res.Filename = Queue->DestFile; + switch (DealWithHeaders(Res)) + { + // Ok, the file is Open + case FILE_IS_OPEN: + { + URIStart(Res); + + // Run the data + bool Result = true; + if (Server->HaveContent) + Result = Server->RunData(File); + + /* If the server is sending back sizeless responses then fill in + the size now */ + if (Res.Size == 0) + Res.Size = File->Size(); + + // Close the file, destroy the FD object and timestamp it + FailFd = -1; + delete File; + File = 0; + + // Timestamp + struct utimbuf UBuf; + time(&UBuf.actime); + UBuf.actime = Server->Date; + UBuf.modtime = Server->Date; + utime(Queue->DestFile.c_str(),&UBuf); + + // Send status to APT + if (Result == true) + { + Res.TakeHashes(*Server->GetHashes()); + URIDone(Res); + } + else + { + if (Server->IsOpen() == false) + { + FailCounter++; + _error->Discard(); + Server->Close(); + + if (FailCounter >= 2) + { + Fail(_("Connection failed"),true); + FailCounter = 0; + } + + QueueBack = Queue; + } + else + Fail(true); + } + break; + } + + // IMS hit + case IMS_HIT: + { + URIDone(Res); + break; + } + + // Hard server error, not found or something + case ERROR_UNRECOVERABLE: + { + Fail(); + break; + } + + // Hard internal error, kill the connection and fail + case ERROR_NOT_FROM_SERVER: + { + delete File; + File = 0; + + Fail(); + RotateDNS(); + Server->Close(); + break; + } + + // We need to flush the data, the header is like a 404 w/ error text + case ERROR_WITH_CONTENT_PAGE: + { + Fail(); + + // Send to content to dev/null + File = new FileFd("/dev/null",FileFd::WriteExists); + Server->RunData(File); + delete File; + File = 0; + break; + } + + // Try again with a new URL + case TRY_AGAIN_OR_REDIRECT: + { + // Clear rest of response if there is content + if (Server->HaveContent) + { + File = new FileFd("/dev/null",FileFd::WriteExists); + Server->RunData(File); + delete File; + File = 0; + } + + /* Detect redirect loops. No more redirects are allowed + after the same URI is seen twice in a queue item. */ + StringVector &R = Redirected[Queue->DestFile]; + bool StopRedirects = false; + if (R.empty() == true) + R.push_back(Queue->Uri); + else if (R[0] == "STOP" || R.size() > 10) + StopRedirects = true; + else + { + for (StringVectorIterator I = R.begin(); I != R.end(); ++I) + if (Queue->Uri == *I) + { + R[0] = "STOP"; + break; + } + + R.push_back(Queue->Uri); + } + + if (StopRedirects == false) + Redirect(NextURI); + else + Fail(); + + break; + } + + default: + Fail(_("Internal error")); + break; + } + + FailCounter = 0; + } + + return 0; +} + /*}}}*/ diff --git a/methods/server.h b/methods/server.h new file mode 100644 index 000000000..2d43b332f --- /dev/null +++ b/methods/server.h @@ -0,0 +1,142 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +/* ###################################################################### + + Classes dealing with the abstraction of talking to a end via a text + protocol like HTTP (which is used by the http and https methods) + + ##################################################################### */ + /*}}}*/ + +#ifndef APT_SERVER_H +#define APT_SERVER_H + +#include + +#include + +using std::cout; +using std::endl; + +class Hashes; +class ServerMethod; +class FileFd; + +struct ServerState +{ + // This is the last parsed Header Line + unsigned int Major; + unsigned int Minor; + unsigned int Result; + char Code[360]; + + // These are some statistics from the last parsed header lines + unsigned long long Size; + signed long long StartPos; + time_t Date; + bool HaveContent; + enum {Chunked,Stream,Closes} Encoding; + enum {Header, Data} State; + bool Persistent; + std::string Location; + + // This is a Persistent attribute of the server itself. + bool Pipeline; + URI ServerName; + URI Proxy; + unsigned long TimeOut; + + protected: + ServerMethod *Owner; + + bool HeaderLine(std::string Line); + virtual bool ReadHeaderLines(std::string &Data) = 0; + virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) = 0; + + public: + /** \brief Result of the header acquire */ + enum RunHeadersResult { + /** \brief Header ok */ + RUN_HEADERS_OK, + /** \brief IO error while retrieving */ + RUN_HEADERS_IO_ERROR, + /** \brief Parse error after retrieving */ + RUN_HEADERS_PARSE_ERROR, + }; + /** \brief Get the headers before the data */ + RunHeadersResult RunHeaders(FileFd * const File); + + bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;}; + virtual void Reset() {Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; Size = 0; + StartPos = 0; Encoding = Closes; time(&Date); HaveContent = false; + State = Header; Persistent = false; Pipeline = true;}; + virtual bool WriteResponse(std::string const &Data) = 0; + + /** \brief Transfer the data from the socket */ + virtual bool RunData(FileFd * const File) = 0; + + virtual bool Open() = 0; + virtual bool IsOpen() = 0; + virtual bool Close() = 0; + virtual bool InitHashes(FileFd &File) = 0; + virtual Hashes * GetHashes() = 0; + virtual bool Die(FileFd &File) = 0; + virtual bool Flush(FileFd * const File) = 0; + virtual bool Go(bool ToFile, FileFd * const File) = 0; + + ServerState(URI Srv, ServerMethod *Owner); + virtual ~ServerState() {}; +}; + +class ServerMethod : public pkgAcqMethod +{ + protected: + virtual bool Fetch(FetchItem *); + + ServerState *Server; + std::string NextURI; + FileFd *File; + + unsigned long PipelineDepth; + bool AllowRedirect; + + public: + bool Debug; + + /** \brief Result of the header parsing */ + enum DealWithHeadersResult { + /** \brief The file is open and ready */ + FILE_IS_OPEN, + /** \brief We got a IMS hit, the file has not changed */ + IMS_HIT, + /** \brief The server reported a unrecoverable error */ + ERROR_UNRECOVERABLE, + /** \brief The server reported a error with a error content page */ + ERROR_WITH_CONTENT_PAGE, + /** \brief An error on the client side */ + ERROR_NOT_FROM_SERVER, + /** \brief A redirect or retry request */ + TRY_AGAIN_OR_REDIRECT + }; + /** \brief Handle the retrieved header data */ + DealWithHeadersResult DealWithHeaders(FetchResult &Res); + + // In the event of a fatal signal this file will be closed and timestamped. + static std::string FailFile; + static int FailFd; + static time_t FailTime; + static void SigTerm(int); + + virtual bool Configuration(std::string Message); + virtual bool Flush() { return Server->Flush(File); }; + + int Loop(); + + virtual void SendReq(FetchItem *Itm) = 0; + virtual ServerState * CreateServerState(URI uri) = 0; + + ServerMethod(const char *Ver,unsigned long Flags = 0) : pkgAcqMethod(Ver, Flags), PipelineDepth(0), AllowRedirect(false), Debug(false) {}; + virtual ~ServerMethod() {}; +}; + +#endif -- cgit v1.2.3-70-g09d2 From 85050e764482197aad5daeeafd95ff6bf680afcb Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Mon, 30 Sep 2013 13:42:33 +0200 Subject: fix partial (206 and 416) support in https As lengthy discussed in lp:1157943 partial https support was utterly broken as a 206 response was handled as an (unhandled) error. This is the first part of fixing it by supporting a 206 response and starting to deal with 416. --- methods/https.cc | 100 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 33 deletions(-) (limited to 'methods') diff --git a/methods/https.cc b/methods/https.cc index 84ce2d68f..4f00842ba 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -121,7 +121,6 @@ bool HttpsMethod::Fetch(FetchItem *Itm) struct stat SBuf; struct curl_slist *headers=NULL; char curl_errorstr[CURL_ERROR_SIZE]; - long curl_responsecode; URI Uri = Itm->Uri; string remotehost = Uri.Host; @@ -277,7 +276,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm) if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) { char Buf[1000]; - sprintf(Buf, "Range: bytes=%li-", (long) SBuf.st_size - 1); + sprintf(Buf, "Range: bytes=%li-", (long) SBuf.st_size); headers = curl_slist_append(headers, Buf); sprintf(Buf, "If-Range: %s", TimeRFC1123(SBuf.st_mtime).c_str()); headers = curl_slist_append(headers, Buf); @@ -291,75 +290,110 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // go for it - if the file exists, append on it File = new FileFd(Itm->DestFile, FileFd::WriteAny); if (File->Size() > 0) - File->Seek(File->Size() - 1); - + File->Seek(File->Size()); + // keep apt updated Res.Filename = Itm->DestFile; // get it! CURLcode success = curl_easy_perform(curl); + long curl_responsecode; curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &curl_responsecode); - long curl_servdate; - curl_easy_getinfo(curl, CURLINFO_FILETIME, &curl_servdate); - // If the server returns 200 OK but the If-Modified-Since condition is not // met, CURLINFO_CONDITION_UNMET will be set to 1 long curl_condition_unmet = 0; curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet); File->Close(); + curl_slist_free_all(headers); // cleanup - if(success != 0 || (curl_responsecode != 200 && curl_responsecode != 304)) + if (success != 0) { _error->Error("%s", curl_errorstr); - // unlink, no need keep 401/404 page content in partial/ unlink(File->Name().c_str()); - Fail(); + return false; + } + + // server says file not modified + if (curl_responsecode == 304 || curl_condition_unmet == 1) + { + unlink(File->Name().c_str()); + Res.IMSHit = true; + Res.LastModified = Itm->LastModified; + Res.Size = 0; + URIDone(Res); return true; } - // Timestamp - struct utimbuf UBuf; - if (curl_servdate != -1) { - UBuf.actime = curl_servdate; - UBuf.modtime = curl_servdate; - utime(File->Name().c_str(),&UBuf); + if (curl_responsecode != 200 && // OK + curl_responsecode != 206 && // Partial + curl_responsecode != 416) // invalid Range + { + char err[255]; + snprintf(err, sizeof(err) - 1, "HttpError%ld", curl_responsecode); + SetFailReason(err); + _error->Error("%s", err); + // unlink, no need keep 401/404 page content in partial/ + unlink(File->Name().c_str()); + return false; + } + + struct stat resultStat; + if (unlikely(stat(File->Name().c_str(), &resultStat) != 0)) + { + _error->Errno("stat", "Unable to access file %s", File->Name().c_str()); + return false; + } + Res.Size = resultStat.st_size; + + // invalid range-request + if (curl_responsecode == 416) + { + unlink(File->Name().c_str()); + Res.Size = 0; + delete File; + Redirect(Itm->Uri); + return true; } // check the downloaded result - struct stat Buf; - if (stat(File->Name().c_str(),&Buf) == 0) + if (curl_responsecode == 304 || curl_condition_unmet) { - Res.Filename = File->Name(); - Res.LastModified = Buf.st_mtime; - Res.IMSHit = false; - if (curl_responsecode == 304 || curl_condition_unmet) - { - unlink(File->Name().c_str()); - Res.IMSHit = true; - Res.LastModified = Itm->LastModified; - Res.Size = 0; - URIDone(Res); - return true; - } - Res.Size = Buf.st_size; + unlink(File->Name().c_str()); + Res.IMSHit = true; + Res.LastModified = Itm->LastModified; + Res.Size = 0; + URIDone(Res); + return true; } + Res.IMSHit = false; + + // Timestamp + curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); + if (Res.LastModified != -1) + { + struct utimbuf UBuf; + UBuf.actime = Res.LastModified; + UBuf.modtime = Res.LastModified; + utime(File->Name().c_str(),&UBuf); + } + else + Res.LastModified = resultStat.st_mtime; // take hashes Hashes Hash; FileFd Fd(Res.Filename, FileFd::ReadOnly); Hash.AddFD(Fd); Res.TakeHashes(Hash); - + // keep apt updated URIDone(Res); // cleanup Res.Size = 0; delete File; - curl_slist_free_all(headers); return true; }; -- cgit v1.2.3-70-g09d2 From fd46d30571eb240ec3aae792e7a56061ede50524 Mon Sep 17 00:00:00 2001 From: David Kalnischkies Date: Mon, 30 Sep 2013 16:41:16 +0200 Subject: handle complete responses to https range requests Servers might respond with a complete file either because they don't support Ranges at all or the If-Range condition isn't statisfied, so we have to parse the headers curl gets ourself to seek or truncate the file we have so far. This also finially adds the testcase testing a bunch of partial situations for both, http and https - which is now all green. Closes: 617643, 667699 LP: 1157943 --- methods/http.cc | 5 ++ methods/http.h | 1 + methods/https.cc | 76 +++++++++++++------- methods/https.h | 29 ++++++++ methods/makefile | 2 +- methods/server.h | 4 +- test/integration/apt.pem | 49 +++++++++++++ test/integration/framework | 92 +++++++++++++++++++------ test/integration/test-partial-file-support | 107 +++++++++++++++++++++++++++++ 9 files changed, 318 insertions(+), 47 deletions(-) create mode 100644 test/integration/apt.pem create mode 100755 test/integration/test-partial-file-support (limited to 'methods') diff --git a/methods/http.cc b/methods/http.cc index d2f084b04..71a02e53a 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -863,3 +863,8 @@ ServerState * HttpMethod::CreateServerState(URI uri) /*{{{*/ return new HttpServerState(uri, this); } /*}}}*/ +void HttpMethod::RotateDNS() /*{{{*/ +{ + ::RotateDNS(); +} + /*}}}*/ diff --git a/methods/http.h b/methods/http.h index 112ce171d..02c04e8ae 100644 --- a/methods/http.h +++ b/methods/http.h @@ -126,6 +126,7 @@ class HttpMethod : public ServerMethod virtual bool Configuration(std::string Message); virtual ServerState * CreateServerState(URI uri); + virtual void RotateDNS(); protected: std::string AutoDetectProxyCmd; diff --git a/methods/https.cc b/methods/https.cc index 4f00842ba..2a562434b 100644 --- a/methods/https.cc +++ b/methods/https.cc @@ -36,6 +36,41 @@ /*}}}*/ using namespace std; +size_t +HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp) +{ + size_t len = size * nmemb; + HttpsMethod *me = (HttpsMethod *)userp; + std::string line((char*) buffer, len); + for (--len; len > 0; --len) + if (isspace(line[len]) == 0) + { + ++len; + break; + } + line.erase(len); + + if (line.empty() == true) + { + if (me->Server->Result != 416 && me->Server->StartPos != 0) + ; + else if (me->Server->Result == 416 && me->Server->Size == me->File->FileSize()) + { + me->Server->Result = 200; + me->Server->StartPos = me->Server->Size; + } + else + me->Server->StartPos = 0; + + me->File->Truncate(me->Server->StartPos); + me->File->Seek(me->Server->StartPos); + } + else if (me->Server->HeaderLine(line) == false) + return 0; + + return size*nmemb; +} + size_t HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp) { @@ -59,6 +94,14 @@ HttpsMethod::progress_callback(void *clientp, double dltotal, double dlnow, return 0; } +// HttpsServerState::HttpsServerState - Constructor /*{{{*/ +HttpsServerState::HttpsServerState(URI Srv,HttpsMethod *Owner) : ServerState(Srv, NULL) +{ + TimeOut = _config->FindI("Acquire::https::Timeout",TimeOut); + Reset(); +} + /*}}}*/ + void HttpsMethod::SetupProxy() /*{{{*/ { URI ServerName = Queue->Uri; @@ -136,6 +179,8 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // callbacks curl_easy_setopt(curl, CURLOPT_URL, static_cast(Uri).c_str()); + curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header); + curl_easy_setopt(curl, CURLOPT_WRITEHEADER, this); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); curl_easy_setopt(curl, CURLOPT_WRITEDATA, this); curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progress_callback); @@ -289,16 +334,13 @@ bool HttpsMethod::Fetch(FetchItem *Itm) // go for it - if the file exists, append on it File = new FileFd(Itm->DestFile, FileFd::WriteAny); - if (File->Size() > 0) - File->Seek(File->Size()); + Server = new HttpsServerState(Itm->Uri, this); // keep apt updated Res.Filename = Itm->DestFile; // get it! CURLcode success = curl_easy_perform(curl); - long curl_responsecode; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &curl_responsecode); // If the server returns 200 OK but the If-Modified-Since condition is not // met, CURLINFO_CONDITION_UNMET will be set to 1 @@ -317,7 +359,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm) } // server says file not modified - if (curl_responsecode == 304 || curl_condition_unmet == 1) + if (Server->Result == 304 || curl_condition_unmet == 1) { unlink(File->Name().c_str()); Res.IMSHit = true; @@ -326,13 +368,14 @@ bool HttpsMethod::Fetch(FetchItem *Itm) URIDone(Res); return true; } + Res.IMSHit = false; - if (curl_responsecode != 200 && // OK - curl_responsecode != 206 && // Partial - curl_responsecode != 416) // invalid Range + if (Server->Result != 200 && // OK + Server->Result != 206 && // Partial + Server->Result != 416) // invalid Range { char err[255]; - snprintf(err, sizeof(err) - 1, "HttpError%ld", curl_responsecode); + snprintf(err, sizeof(err) - 1, "HttpError%i", Server->Result); SetFailReason(err); _error->Error("%s", err); // unlink, no need keep 401/404 page content in partial/ @@ -349,7 +392,7 @@ bool HttpsMethod::Fetch(FetchItem *Itm) Res.Size = resultStat.st_size; // invalid range-request - if (curl_responsecode == 416) + if (Server->Result == 416) { unlink(File->Name().c_str()); Res.Size = 0; @@ -358,18 +401,6 @@ bool HttpsMethod::Fetch(FetchItem *Itm) return true; } - // check the downloaded result - if (curl_responsecode == 304 || curl_condition_unmet) - { - unlink(File->Name().c_str()); - Res.IMSHit = true; - Res.LastModified = Itm->LastModified; - Res.Size = 0; - URIDone(Res); - return true; - } - Res.IMSHit = false; - // Timestamp curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified); if (Res.LastModified != -1) @@ -408,4 +439,3 @@ int main() return Mth.Run(); } - diff --git a/methods/https.h b/methods/https.h index 293e288e0..8632d6d02 100644 --- a/methods/https.h +++ b/methods/https.h @@ -14,24 +14,53 @@ #include #include +#include "server.h" + using std::cout; using std::endl; class HttpsMethod; class FileFd; +class HttpsServerState : public ServerState +{ + protected: + virtual bool ReadHeaderLines(std::string &Data) { return false; } + virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) { return false; } + + public: + virtual bool WriteResponse(std::string const &Data) { return false; } + + /** \brief Transfer the data from the socket */ + virtual bool RunData(FileFd * const File) { return false; } + + virtual bool Open() { return false; } + virtual bool IsOpen() { return false; } + virtual bool Close() { return false; } + virtual bool InitHashes(FileFd &File) { return false; } + virtual Hashes * GetHashes() { return NULL; } + virtual bool Die(FileFd &File) { return false; } + virtual bool Flush(FileFd * const File) { return false; } + virtual bool Go(bool ToFile, FileFd * const File) { return false; } + + HttpsServerState(URI Srv, HttpsMethod *Owner); + virtual ~HttpsServerState() {Close();}; +}; + class HttpsMethod : public pkgAcqMethod { // minimum speed in bytes/se that triggers download timeout handling static const int DL_MIN_SPEED = 10; virtual bool Fetch(FetchItem *); + static size_t parse_header(void *buffer, size_t size, size_t nmemb, void *userp); static size_t write_data(void *buffer, size_t size, size_t nmemb, void *userp); static int progress_callback(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow); void SetupProxy(); CURL *curl; FetchResult Res; + HttpsServerState *Server; public: FileFd *File; diff --git a/methods/makefile b/methods/makefile index f8098de74..6b7781294 100644 --- a/methods/makefile +++ b/methods/makefile @@ -55,7 +55,7 @@ include $(PROGRAM_H) PROGRAM=https SLIBS = -lapt-pkg -lcurl $(INTLLIBS) LIB_MAKES = apt-pkg/makefile -SOURCE = https.cc +SOURCE = https.cc server.cc include $(PROGRAM_H) # The ftp method diff --git a/methods/server.h b/methods/server.h index 2d43b332f..4dc6a1f2f 100644 --- a/methods/server.h +++ b/methods/server.h @@ -49,11 +49,12 @@ struct ServerState protected: ServerMethod *Owner; - bool HeaderLine(std::string Line); virtual bool ReadHeaderLines(std::string &Data) = 0; virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) = 0; public: + bool HeaderLine(std::string Line); + /** \brief Result of the header acquire */ enum RunHeadersResult { /** \brief Header ok */ @@ -134,6 +135,7 @@ class ServerMethod : public pkgAcqMethod virtual void SendReq(FetchItem *Itm) = 0; virtual ServerState * CreateServerState(URI uri) = 0; + virtual void RotateDNS() = 0; ServerMethod(const char *Ver,unsigned long Flags = 0) : pkgAcqMethod(Ver, Flags), PipelineDepth(0), AllowRedirect(false), Debug(false) {}; virtual ~ServerMethod() {}; diff --git a/test/integration/apt.pem b/test/integration/apt.pem new file mode 100644 index 000000000..f48df054d --- /dev/null +++ b/test/integration/apt.pem @@ -0,0 +1,49 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCt4R1Q2oYF6utL +19GBhnlHW8L2BI7PRFWge/ZpqIZWsaFcb30FV86Z6aXXZmgfEJ2814ZZYD1IKeCe +JsJpns7B4vYe1v64r995ZNBQAAKIYjICkKZOBgOphV+ChBfrCctVXgfLbMP6iBdH +J02wHzSCCdZm0sdVl9tB5l/OyJU8Mb4KB3btBhfZfY2M6lU+FOjcXs1LOduUrv7K +fZ+DEalvVGkomLtHtD0qb2vkqFrTjVCkziUVWhhxFFflt08oQ01Clxpl+uv7rOQo +jtkJ1LrMuv7iPfaZ/z3qLiFxZYG1BCGEwTOKCtJo6bgFzXiN3q7Q5FFlmv851x2J +Dn8C7Qm7AgMBAAECggEAE3q6vAofJZ6Ryadd8zLLd3ESQFl2XkX7icUZb/DPS/sO +ZrqeuPCDVr7UM3NnisNjyHoktPKRKvp2DYGuGgMOiq4QgJf5ZVten8zpgWze28SU +cbEe0HLgCifE8Ww2+b/ZJbEpEmMW+YQxh2khzO9SBJdxi4dliXM/vvw+E35pKZsB +s6glrz6VQAxxa9fY4fLnB2DafHy+pUvRVw8gC6PCM9jXN9tMYAqztsJu7aaanNyT +HX2UDWa8hxVx6t5UQZuxvst9N+RcEwmVCR2qlfZt/VRBRibBm62crEKbTD00mNHQ +4AIDn3g6Y3SXpDlgtNpjLyBL3fODPIwqwGdblaSKkQKBgQDYXecu0Eda7kbR5ciW +IAn8XOxsBIkkh8YVl2gRiiajRVoeiYBHaW9TyuQiaWrftiDQxB/N4G2focTXy/7O +VJn6e/SUoO/ZGRw2GbTxLUQptgvFsejYCcW9XpC8MCwE/y2swiY7JM0WR8cV2nCk +a/Cls6f1LjL13aFO0PAorEcahQKBgQDNuth6EHZVwfDgUuqhRw4HIIpfsfiA3UOd +b5k/NsfQIev1YUqnfucgInNPDq2Jf8eTQw3TKaszo2DCjDffCsEgM09Tym143Bd6 +AsMuqAStsE3IEC7pnmh95l29/7mh4OuG5cp5JUx0Pi5PkuJ6ywA8P1rM1MB9Zf52 +NGJCo1pnPwKBgQCx/n4i+uDYo1DLd/dN2UmdvGwaaJjR3ohMVuQcGcSzaGg82u0W +0lvtWOnYjVSIeXIBjHaFjW1hd1lSFdWms96AO9z3MHZf6NJWh0tdZNnAXqzMlBFz +OIbdxJ/Y0OBFtA9FIesFmL7G54GWLr+f49Ry3Jr9jmYJ8au0BRqsux07aQKBgC4q +CT2KyCMCO/z6XjAGc71hres/UlYIUI3ZZvfqYPfxRLNxO4FOVqq9UEajMomyJKSE +3WtO5F3YAXRmZnskPKXvHZPdzqbaLGJykD298h7PewSzrPM7WpM1yD9ETPFoOTGy +CrcYiYlkEpxEh5GqT8k1JjjkXLVG18zKgGoXocedAoGAQyU2DCNfxwzIJfFHKZEG +zpni72cR68Tu3AhW/38vMR2ZPca4KzXrUA52T+j7vkQC38LHm/mzNXNP7Vya0PJ3 +WoYOcLtg2uFPh0P/35ArEzuNooLsvulgg1jsamPbF8KAvJZKZHr30hlC/JGYSBbV +bnkzJTShsKzHIUiLtQ8Ja+E= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIJAJ39xapQo0vLMA0GCSqGSIb3DQEBBQUAMFMxCzAJBgNV +BAYTAkRFMRMwEQYDVQQIDApTb21lLVN0YXRlMRswGQYDVQQKDBJBUFQgVGVzdGNh +c2VzIEdtYkgxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xMzA5MTYwODQ4MzVaGA80 +NzUxMDgxMzA4NDgzNVowUzELMAkGA1UEBhMCREUxEzARBgNVBAgMClNvbWUtU3Rh +dGUxGzAZBgNVBAoMEkFQVCBUZXN0Y2FzZXMgR21iSDESMBAGA1UEAwwJbG9jYWxo +b3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAreEdUNqGBerrS9fR +gYZ5R1vC9gSOz0RVoHv2aaiGVrGhXG99BVfOmeml12ZoHxCdvNeGWWA9SCngnibC +aZ7OweL2Htb+uK/feWTQUAACiGIyApCmTgYDqYVfgoQX6wnLVV4Hy2zD+ogXRydN +sB80ggnWZtLHVZfbQeZfzsiVPDG+Cgd27QYX2X2NjOpVPhTo3F7NSznblK7+yn2f +gxGpb1RpKJi7R7Q9Km9r5Kha041QpM4lFVoYcRRX5bdPKENNQpcaZfrr+6zkKI7Z +CdS6zLr+4j32mf896i4hcWWBtQQhhMEzigrSaOm4Bc14jd6u0ORRZZr/OdcdiQ5/ +Au0JuwIDAQABo1AwTjAdBgNVHQ4EFgQUhd26E7ykEYRTDbgMzkYtFtENhSkwHwYD +VR0jBBgwFoAUhd26E7ykEYRTDbgMzkYtFtENhSkwDAYDVR0TBAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAWcyMKi0Vc4beGV7w4Qft0/2P68jjMlQRdgkz+gGXbMVr +//KhqR3PbgFmHHpUsZ718AHeerNNdfFzOUptiAiOqH2muyAGdeWCxJ8KcU0sic8x +/h3TOzMYfEozhgMSJp9YW1z655uHcb15S7jb4zZwXwGyQzxwXT35SKj2mCqSbjIb +G987DGI+MtyoGRXhIwnBEsGTI1ck3NoeXBJ/tS/Ma8gUUC2xldMSprtHjeUHvZV2 +iz/HTqGlMLGW96AVeZiFNiC1fJ6pvref2XW5MkkvQm8tOi2cSrwJc9CgnCpCxkLp +liRsbwAduwkA26XzEomMR7yyYS5pm0Eu0cO9X39FKQ== +-----END CERTIFICATE----- diff --git a/test/integration/framework b/test/integration/framework index 4003d932c..a2bb871cc 100644 --- a/test/integration/framework +++ b/test/integration/framework @@ -795,6 +795,13 @@ signreleasefiles() { msgdone "info" } +rewritesourceslist() { + local APTARCHIVE="file://$(readlink -f "${TMPWORKINGDIRECTORY}/aptarchive")" + for LIST in $(find rootdir/etc/apt/sources.list.d/ -name 'apt-test-*.list'); do + sed -i $LIST -e "s#$APTARCHIVE#${1}#" -e "s#http://localhost:8080/#${1}#" -e "s#http://localhost:4433/#${1}#" + done +} + changetowebserver() { local LOG='/dev/null' if test -x ${BUILDDIRECTORY}/aptwebserver; then @@ -806,31 +813,32 @@ changetowebserver() { fi addtrap "kill $PID;" cd - > /dev/null - elif [ $# -gt 0 ]; then - msgdie 'Need the aptwebserver when passing arguments for the webserver' - elif which weborf > /dev/null; then - weborf -xb aptarchive/ >$LOG 2>&1 & - addtrap "kill $!;" - elif which gatling > /dev/null; then - cd aptarchive - gatling -p 8080 -F -S >$LOG 2>&1 & - addtrap "kill $!;" - cd - > /dev/null - elif which lighttpd > /dev/null; then - echo "server.document-root = \"$(readlink -f ./aptarchive)\" -server.port = 8080 -server.stat-cache-engine = \"disable\"" > lighttpd.conf - lighttpd -t -f lighttpd.conf >/dev/null || msgdie 'Can not change to webserver: our lighttpd config is invalid' - lighttpd -D -f lighttpd.conf >$LOG 2>&1 & - addtrap "kill $!;" else msgdie 'You have to build aptwerbserver or install a webserver' fi - local APTARCHIVE="file://$(readlink -f ./aptarchive)" - for LIST in $(find rootdir/etc/apt/sources.list.d/ -name 'apt-test-*.list'); do - sed -i $LIST -e "s#$APTARCHIVE#http://localhost:8080/#" - done - return 0 + if [ "$1" != '--no-rewrite' ]; then + rewritesourceslist 'http://localhost:8080/' + fi +} + +changetohttpswebserver() { + if ! which stunnel4 >/dev/null; then + msgdie 'You need to install stunnel4 for https testcases' + fi + if [ ! -e "${TMPWORKINGDIRECTORY}/aptarchive/aptwebserver.pid" ]; then + changetowebserver --no-rewrite + fi + echo "pid = ${TMPWORKINGDIRECTORY}/aptarchive/stunnel.pid +cert = ${TESTDIRECTORY}/apt.pem + +[https] +accept = 4433 +connect = 8080 +" > ${TMPWORKINGDIRECTORY}/stunnel.conf + stunnel4 "${TMPWORKINGDIRECTORY}/stunnel.conf" + local PID="$(cat ${TMPWORKINGDIRECTORY}/aptarchive/stunnel.pid)" + addtrap 'prefix' "kill ${PID};" + rewritesourceslist 'https://localhost:4433/' } changetocdrom() { @@ -848,6 +856,46 @@ changetocdrom() { find rootdir/etc/apt/sources.list.d/ -name 'apt-test-*.list' -delete } +downloadfile() { + PROTO="$(echo "$1" | cut -d':' -f 1)" + local DOWNLOG="${TMPWORKINGDIRECTORY}/download.log" + rm -f "$DOWNLOG" + touch "$DOWNLOG" + { + echo "601 Configuration +Config-Item: Acquire::https::CaInfo=${TESTDIR}/apt.pem +Config-Item: Debug::Acquire::${PROTO}=1 + +600 Acquire URI +URI: $1 +Filename: ${2} +" + # simple worker keeping stdin open until we are done (201) or error (400) + # and requesting new URIs on try-agains/redirects inbetween + { tail -n 999 -f "$DOWNLOG" & echo "TAILPID: $!"; } | while read f1 f2; do + if [ "$f1" = 'TAILPID:' ]; then + TAILPID="$f2" + elif [ "$f1" = 'New-URI:' ]; then + echo "600 Acquire URI +URI: $f2 +Filename: ${2} +" + elif [ "$f1" = '201' ] || [ "$f1" = '400' ]; then + # tail would only die on next read – which never happens + test -z "$TAILPID" || kill -s HUP "$TAILPID" + break + fi + done + } | LD_LIBRARY_PATH=${BUILDDIRECTORY} ${BUILDDIRECTORY}/methods/${PROTO} 2>&1 | tee "$DOWNLOG" + rm "$DOWNLOG" + # only if the file exists the download was successful + if [ -e "$2" ]; then + return 0 + else + return 1 + fi +} + checkdiff() { local DIFFTEXT="$($(which diff) -u $* | sed -e '/^---/ d' -e '/^+++/ d' -e '/^@@/ d')" if [ -n "$DIFFTEXT" ]; then diff --git a/test/integration/test-partial-file-support b/test/integration/test-partial-file-support new file mode 100755 index 000000000..8d1c51ae0 --- /dev/null +++ b/test/integration/test-partial-file-support @@ -0,0 +1,107 @@ +#!/bin/sh +set -e + +TESTDIR=$(readlink -f $(dirname $0)) +. $TESTDIR/framework +setupenvironment +configarchitecture 'amd64' + +changetowebserver + +copysource() { + dd if="$1" bs=1 count="$2" of="$3" 2>/dev/null + touch -d "$(stat --format '%y' "${TESTFILE}")" "$3" +} + +testdownloadfile() { + local DOWNLOG='download-testfile.log' + rm -f "$DOWNLOG" + msgtest "Testing download of file $2 with" "$1" + if ! downloadfile "$2" "$3" > "$DOWNLOG"; then + cat "$DOWNLOG" + msgfail + else + msgpass + fi + cat "$DOWNLOG" | while read field hash; do + local EXPECTED + case "$field" in + 'MD5Sum-Hash:') EXPECTED="$(md5sum "$TESTFILE" | cut -d' ' -f 1)";; + 'SHA1-Hash:') EXPECTED="$(sha1sum "$TESTFILE" | cut -d' ' -f 1)";; + 'SHA256-Hash:') EXPECTED="$(sha256sum "$TESTFILE" | cut -d' ' -f 1)";; + 'SHA512-Hash:') EXPECTED="$(sha512sum "$TESTFILE" | cut -d' ' -f 1)";; + *) continue;; + esac + if [ "$4" = '=' ]; then + msgtest 'Test downloaded file for correct' "$field" + else + msgtest 'Test downloaded file does not match in' "$field" + fi + if [ "$EXPECTED" "$4" "$hash" ]; then + msgpass + else + cat "$DOWNLOG" + msgfail "expected: $EXPECTED ; got: $hash" + fi + done +} + +testwebserverlaststatuscode() { + STATUS="$(mktemp)" + addtrap "rm $STATUS;" + msgtest 'Test last status code from the webserver was' "$1" + downloadfile "http://localhost:8080/_config/find/aptwebserver::last-status-code" "$STATUS" >/dev/null + if [ "$(cat "$STATUS")" = "$1" ]; then + msgpass + else + cat download-testfile.log + msgfail "Status was $(cat "$STATUS")" + fi +} + + +TESTFILE='aptarchive/testfile' +cp -a ${TESTDIR}/framework $TESTFILE + +testrun() { + downloadfile "$1/_config/set/aptwebserver::support::range/true" '/dev/null' >/dev/null + testwebserverlaststatuscode '200' + + copysource $TESTFILE 0 ./testfile + testdownloadfile 'no data' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '200' + + copysource $TESTFILE 20 ./testfile + testdownloadfile 'valid partial data' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '206' + + copysource /dev/zero 20 ./testfile + testdownloadfile 'invalid partial data' "${1}/testfile" './testfile' '!=' + testwebserverlaststatuscode '206' + + copysource $TESTFILE 1M ./testfile + testdownloadfile 'completely downloaded file' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '416' + + copysource /dev/zero 1M ./testfile + testdownloadfile 'too-big partial file' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '200' + + copysource /dev/zero 20 ./testfile + touch ./testfile + testdownloadfile 'old data' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '200' + + downloadfile "$1/_config/set/aptwebserver::support::range/false" '/dev/null' >/dev/null + testwebserverlaststatuscode '200' + + copysource $TESTFILE 20 ./testfile + testdownloadfile 'no server support' "${1}/testfile" './testfile' '=' + testwebserverlaststatuscode '200' +} + +testrun 'http://localhost:8080' + +changetohttpswebserver + +testrun 'https://localhost:4433' -- cgit v1.2.3-70-g09d2