TerraSync: retry after socket failures
Assume socket failures are intermittent, up to some maximum count (currently configured as 16). Add a test case to cover this.
This commit is contained in:
parent
9c530d6978
commit
bd9f04d980
@ -35,10 +35,12 @@ set(SOURCES
|
|||||||
sg_socket.cxx
|
sg_socket.cxx
|
||||||
sg_socket_udp.cxx
|
sg_socket_udp.cxx
|
||||||
HTTPClient.cxx
|
HTTPClient.cxx
|
||||||
|
HTTPTestApi_private.hxx
|
||||||
HTTPFileRequest.cxx
|
HTTPFileRequest.cxx
|
||||||
HTTPMemoryRequest.cxx
|
HTTPMemoryRequest.cxx
|
||||||
HTTPRequest.cxx
|
HTTPRequest.cxx
|
||||||
HTTPRepository.cxx
|
HTTPRepository.cxx
|
||||||
|
HTTPRepository_private.hxx
|
||||||
untar.cxx
|
untar.cxx
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -81,6 +83,7 @@ add_test(binobj ${EXECUTABLE_OUTPUT_PATH}/test_binobj)
|
|||||||
|
|
||||||
add_executable(test_repository test_repository.cxx)
|
add_executable(test_repository test_repository.cxx)
|
||||||
target_link_libraries(test_repository ${TEST_LIBS})
|
target_link_libraries(test_repository ${TEST_LIBS})
|
||||||
|
target_compile_definitions(test_repository PUBLIC BUILDING_TESTSUITE)
|
||||||
add_test(http_repository ${EXECUTABLE_OUTPUT_PATH}/test_repository)
|
add_test(http_repository ${EXECUTABLE_OUTPUT_PATH}/test_repository)
|
||||||
|
|
||||||
add_executable(test_untar test_untar.cxx)
|
add_executable(test_untar test_untar.cxx)
|
||||||
|
@ -37,7 +37,6 @@
|
|||||||
|
|
||||||
#include <simgear/simgear_config.h>
|
#include <simgear/simgear_config.h>
|
||||||
|
|
||||||
#include <curl/multi.h>
|
|
||||||
|
|
||||||
#include <simgear/io/sg_netChat.hxx>
|
#include <simgear/io/sg_netChat.hxx>
|
||||||
|
|
||||||
@ -47,6 +46,9 @@
|
|||||||
#include <simgear/timing/timestamp.hxx>
|
#include <simgear/timing/timestamp.hxx>
|
||||||
#include <simgear/structure/exception.hxx>
|
#include <simgear/structure/exception.hxx>
|
||||||
|
|
||||||
|
#include "HTTPClient_private.hxx"
|
||||||
|
#include "HTTPTestApi_private.hxx"
|
||||||
|
|
||||||
#if defined( HAVE_VERSION_H ) && HAVE_VERSION_H
|
#if defined( HAVE_VERSION_H ) && HAVE_VERSION_H
|
||||||
#include "version.h"
|
#include "version.h"
|
||||||
#else
|
#else
|
||||||
@ -64,50 +66,20 @@ namespace HTTP
|
|||||||
extern const int DEFAULT_HTTP_PORT = 80;
|
extern const int DEFAULT_HTTP_PORT = 80;
|
||||||
const char* CONTENT_TYPE_URL_ENCODED = "application/x-www-form-urlencoded";
|
const char* CONTENT_TYPE_URL_ENCODED = "application/x-www-form-urlencoded";
|
||||||
|
|
||||||
class Connection;
|
void Client::ClientPrivate::createCurlMulti() {
|
||||||
typedef std::multimap<std::string, Connection*> ConnectionDict;
|
curlMulti = curl_multi_init();
|
||||||
typedef std::list<Request_ptr> RequestList;
|
// see https://curl.haxx.se/libcurl/c/CURLMOPT_PIPELINING.html
|
||||||
|
// we request HTTP 1.1 pipelining
|
||||||
class Client::ClientPrivate
|
curl_multi_setopt(curlMulti, CURLMOPT_PIPELINING, 1 /* aka CURLPIPE_HTTP1 */);
|
||||||
{
|
|
||||||
public:
|
|
||||||
CURLM* curlMulti;
|
|
||||||
|
|
||||||
void createCurlMulti()
|
|
||||||
{
|
|
||||||
curlMulti = curl_multi_init();
|
|
||||||
// see https://curl.haxx.se/libcurl/c/CURLMOPT_PIPELINING.html
|
|
||||||
// we request HTTP 1.1 pipelining
|
|
||||||
curl_multi_setopt(curlMulti, CURLMOPT_PIPELINING, 1 /* aka CURLPIPE_HTTP1 */);
|
|
||||||
#if (LIBCURL_VERSION_MINOR >= 30)
|
#if (LIBCURL_VERSION_MINOR >= 30)
|
||||||
curl_multi_setopt(curlMulti, CURLMOPT_MAX_TOTAL_CONNECTIONS, (long) maxConnections);
|
curl_multi_setopt(curlMulti, CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||||
curl_multi_setopt(curlMulti, CURLMOPT_MAX_PIPELINE_LENGTH,
|
(long)maxConnections);
|
||||||
(long) maxPipelineDepth);
|
curl_multi_setopt(curlMulti, CURLMOPT_MAX_PIPELINE_LENGTH,
|
||||||
curl_multi_setopt(curlMulti, CURLMOPT_MAX_HOST_CONNECTIONS,
|
(long)maxPipelineDepth);
|
||||||
(long) maxHostConnections);
|
curl_multi_setopt(curlMulti, CURLMOPT_MAX_HOST_CONNECTIONS,
|
||||||
|
(long)maxHostConnections);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef std::map<Request_ptr, CURL*> RequestCurlMap;
|
|
||||||
RequestCurlMap requests;
|
|
||||||
|
|
||||||
std::string userAgent;
|
|
||||||
std::string proxy;
|
|
||||||
int proxyPort;
|
|
||||||
std::string proxyAuth;
|
|
||||||
unsigned int maxConnections;
|
|
||||||
unsigned int maxHostConnections;
|
|
||||||
unsigned int maxPipelineDepth;
|
|
||||||
|
|
||||||
RequestList pendingRequests;
|
|
||||||
|
|
||||||
SGTimeStamp timeTransferSample;
|
|
||||||
unsigned int bytesTransferred;
|
|
||||||
unsigned int lastTransferRate;
|
|
||||||
uint64_t totalBytesDownloaded;
|
|
||||||
|
|
||||||
SGPath tlsCertificatePath;
|
|
||||||
};
|
|
||||||
|
|
||||||
Client::Client() :
|
Client::Client() :
|
||||||
d(new ClientPrivate)
|
d(new ClientPrivate)
|
||||||
@ -223,12 +195,23 @@ void Client::update(int waitTimeout)
|
|||||||
assert(it->second == e);
|
assert(it->second == e);
|
||||||
d->requests.erase(it);
|
d->requests.erase(it);
|
||||||
|
|
||||||
if (msg->data.result == 0) {
|
bool doProcess = true;
|
||||||
req->responseComplete();
|
if (d->testsuiteResponseDoneCallback) {
|
||||||
} else {
|
doProcess =
|
||||||
SG_LOG(SG_IO, SG_WARN, "CURL Result:" << msg->data.result << " " << curl_easy_strerror(msg->data.result));
|
!d->testsuiteResponseDoneCallback(msg->data.result, req);
|
||||||
req->setFailure(msg->data.result, curl_easy_strerror(msg->data.result));
|
}
|
||||||
}
|
|
||||||
|
if (doProcess) {
|
||||||
|
if (msg->data.result == 0) {
|
||||||
|
req->responseComplete();
|
||||||
|
} else {
|
||||||
|
SG_LOG(SG_IO, SG_WARN,
|
||||||
|
"CURL Result:" << msg->data.result << " "
|
||||||
|
<< curl_easy_strerror(msg->data.result));
|
||||||
|
req->setFailure(msg->data.result,
|
||||||
|
curl_easy_strerror(msg->data.result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
curl_multi_remove_handle(d->curlMulti, e);
|
curl_multi_remove_handle(d->curlMulti, e);
|
||||||
curl_easy_cleanup(e);
|
curl_easy_cleanup(e);
|
||||||
@ -559,6 +542,17 @@ void Client::clearAllConnections()
|
|||||||
d->createCurlMulti();
|
d->createCurlMulti();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
void TestApi::setResponseDoneCallback(Client *cl, ResponseDoneCallback cb) {
|
||||||
|
cl->d->testsuiteResponseDoneCallback = cb;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestApi::markRequestAsFailed(Request_ptr req, int curlCode,
|
||||||
|
const std::string &message) {
|
||||||
|
req->setFailure(curlCode, message);
|
||||||
|
}
|
||||||
|
|
||||||
} // of namespace HTTP
|
} // of namespace HTTP
|
||||||
|
|
||||||
} // of namespace simgear
|
} // of namespace simgear
|
||||||
|
@ -24,7 +24,8 @@
|
|||||||
#ifndef SG_HTTP_CLIENT_HXX
|
#ifndef SG_HTTP_CLIENT_HXX
|
||||||
#define SG_HTTP_CLIENT_HXX
|
#define SG_HTTP_CLIENT_HXX
|
||||||
|
|
||||||
#include <memory> // for std::unique_ptr
|
#include <functional>
|
||||||
|
#include <memory> // for std::unique_ptr
|
||||||
#include <stdint.h> // for uint_64t
|
#include <stdint.h> // for uint_64t
|
||||||
|
|
||||||
#include <simgear/io/HTTPFileRequest.hxx>
|
#include <simgear/io/HTTPFileRequest.hxx>
|
||||||
@ -125,6 +126,7 @@ private:
|
|||||||
|
|
||||||
friend class Connection;
|
friend class Connection;
|
||||||
friend class Request;
|
friend class Request;
|
||||||
|
friend class TestApi;
|
||||||
|
|
||||||
class ClientPrivate;
|
class ClientPrivate;
|
||||||
std::unique_ptr<ClientPrivate> d;
|
std::unique_ptr<ClientPrivate> d;
|
||||||
|
68
simgear/io/HTTPClient_private.hxx
Normal file
68
simgear/io/HTTPClient_private.hxx
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// This library is free software; you can redistribute it and/or
|
||||||
|
// modify it under the terms of the GNU Library General Public
|
||||||
|
// License as published by the Free Software Foundation; either
|
||||||
|
// version 2 of the License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
// Library General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Library General Public
|
||||||
|
// License along with this library; if not, write to the Free Software
|
||||||
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <list>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
#include "HTTPClient.hxx"
|
||||||
|
#include "HTTPRequest.hxx"
|
||||||
|
|
||||||
|
#include <simgear/timing/timestamp.hxx>
|
||||||
|
|
||||||
|
#include <curl/multi.h>
|
||||||
|
|
||||||
|
namespace simgear {
|
||||||
|
namespace HTTP {
|
||||||
|
|
||||||
|
typedef std::list<Request_ptr> RequestList;
|
||||||
|
|
||||||
|
using ResponseDoneCallback =
|
||||||
|
std::function<bool(int curlResult, Request_ptr req)>;
|
||||||
|
|
||||||
|
class Client::ClientPrivate {
|
||||||
|
public:
|
||||||
|
CURLM *curlMulti;
|
||||||
|
|
||||||
|
void createCurlMulti();
|
||||||
|
|
||||||
|
typedef std::map<Request_ptr, CURL *> RequestCurlMap;
|
||||||
|
RequestCurlMap requests;
|
||||||
|
|
||||||
|
std::string userAgent;
|
||||||
|
std::string proxy;
|
||||||
|
int proxyPort;
|
||||||
|
std::string proxyAuth;
|
||||||
|
unsigned int maxConnections;
|
||||||
|
unsigned int maxHostConnections;
|
||||||
|
unsigned int maxPipelineDepth;
|
||||||
|
|
||||||
|
RequestList pendingRequests;
|
||||||
|
|
||||||
|
SGTimeStamp timeTransferSample;
|
||||||
|
unsigned int bytesTransferred;
|
||||||
|
unsigned int lastTransferRate;
|
||||||
|
uint64_t totalBytesDownloaded;
|
||||||
|
|
||||||
|
SGPath tlsCertificatePath;
|
||||||
|
|
||||||
|
// only used by unit-tests / test-api, but
|
||||||
|
// only costs us a pointe here to declare it.
|
||||||
|
ResponseDoneCallback testsuiteResponseDoneCallback;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace HTTP
|
||||||
|
|
||||||
|
} // namespace simgear
|
@ -45,132 +45,43 @@
|
|||||||
|
|
||||||
#include <simgear/misc/sg_hash.hxx>
|
#include <simgear/misc/sg_hash.hxx>
|
||||||
|
|
||||||
|
#include "HTTPRepository_private.hxx"
|
||||||
|
|
||||||
namespace simgear
|
namespace simgear
|
||||||
{
|
{
|
||||||
|
|
||||||
class HTTPDirectory;
|
namespace {
|
||||||
using HTTPDirectory_ptr = std::unique_ptr<HTTPDirectory>;
|
|
||||||
|
|
||||||
class HTTPRepoGetRequest : public HTTP::Request
|
std::string innerResultCodeAsString(HTTPRepository::ResultCode code) {
|
||||||
{
|
switch (code) {
|
||||||
public:
|
case HTTPRepository::REPO_NO_ERROR:
|
||||||
HTTPRepoGetRequest(HTTPDirectory* d, const std::string& u) :
|
return "no error";
|
||||||
HTTP::Request(u),
|
case HTTPRepository::REPO_ERROR_NOT_FOUND:
|
||||||
_directory(d)
|
return "not found";
|
||||||
{
|
case HTTPRepository::REPO_ERROR_SOCKET:
|
||||||
}
|
return "socket error";
|
||||||
|
case HTTPRepository::SVN_ERROR_XML:
|
||||||
|
return "malformed XML";
|
||||||
|
case HTTPRepository::SVN_ERROR_TXDELTA:
|
||||||
|
return "malformed XML";
|
||||||
|
case HTTPRepository::REPO_ERROR_IO:
|
||||||
|
return "I/O error";
|
||||||
|
case HTTPRepository::REPO_ERROR_CHECKSUM:
|
||||||
|
return "checksum verification error";
|
||||||
|
case HTTPRepository::REPO_ERROR_FILE_NOT_FOUND:
|
||||||
|
return "file not found";
|
||||||
|
case HTTPRepository::REPO_ERROR_HTTP:
|
||||||
|
return "HTTP-level error";
|
||||||
|
case HTTPRepository::REPO_ERROR_CANCELLED:
|
||||||
|
return "cancelled";
|
||||||
|
case HTTPRepository::REPO_PARTIAL_UPDATE:
|
||||||
|
return "partial update (incomplete)";
|
||||||
|
}
|
||||||
|
|
||||||
virtual void cancel();
|
return "Unknown response code";
|
||||||
|
}
|
||||||
|
|
||||||
size_t contentSize() const
|
} // namespace
|
||||||
{
|
|
||||||
return _contentSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
void setContentSize(size_t sz)
|
|
||||||
{
|
|
||||||
_contentSize = sz;
|
|
||||||
}
|
|
||||||
protected:
|
|
||||||
HTTPDirectory* _directory;
|
|
||||||
size_t _contentSize = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef SGSharedPtr<HTTPRepoGetRequest> RepoRequestPtr;
|
|
||||||
|
|
||||||
std::string innerResultCodeAsString(HTTPRepository::ResultCode code)
|
|
||||||
{
|
|
||||||
switch (code) {
|
|
||||||
case HTTPRepository::REPO_NO_ERROR: return "no error";
|
|
||||||
case HTTPRepository::REPO_ERROR_NOT_FOUND: return "not found";
|
|
||||||
case HTTPRepository::REPO_ERROR_SOCKET: return "socket error";
|
|
||||||
case HTTPRepository::SVN_ERROR_XML: return "malformed XML";
|
|
||||||
case HTTPRepository::SVN_ERROR_TXDELTA: return "malformed XML";
|
|
||||||
case HTTPRepository::REPO_ERROR_IO: return "I/O error";
|
|
||||||
case HTTPRepository::REPO_ERROR_CHECKSUM: return "checksum verification error";
|
|
||||||
case HTTPRepository::REPO_ERROR_FILE_NOT_FOUND: return "file not found";
|
|
||||||
case HTTPRepository::REPO_ERROR_HTTP: return "HTTP-level error";
|
|
||||||
case HTTPRepository::REPO_ERROR_CANCELLED: return "cancelled";
|
|
||||||
case HTTPRepository::REPO_PARTIAL_UPDATE: return "partial update (incomplete)";
|
|
||||||
}
|
|
||||||
|
|
||||||
return "Unknown response code";
|
|
||||||
}
|
|
||||||
|
|
||||||
class HTTPRepoPrivate
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
struct HashCacheEntry
|
|
||||||
{
|
|
||||||
std::string filePath;
|
|
||||||
time_t modTime;
|
|
||||||
size_t lengthBytes;
|
|
||||||
std::string hashHex;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::vector<HashCacheEntry> HashCache;
|
|
||||||
HashCache hashes;
|
|
||||||
int hashCacheDirty = 0;
|
|
||||||
|
|
||||||
struct Failure
|
|
||||||
{
|
|
||||||
SGPath path;
|
|
||||||
HTTPRepository::ResultCode error;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::vector<Failure> FailureList;
|
|
||||||
FailureList failures;
|
|
||||||
|
|
||||||
HTTPRepoPrivate(HTTPRepository* parent) :
|
|
||||||
p(parent),
|
|
||||||
isUpdating(false),
|
|
||||||
status(HTTPRepository::REPO_NO_ERROR),
|
|
||||||
totalDownloaded(0)
|
|
||||||
{ ; }
|
|
||||||
|
|
||||||
~HTTPRepoPrivate();
|
|
||||||
|
|
||||||
HTTPRepository* p; // link back to outer
|
|
||||||
HTTP::Client* http;
|
|
||||||
std::string baseUrl;
|
|
||||||
SGPath basePath;
|
|
||||||
bool isUpdating;
|
|
||||||
HTTPRepository::ResultCode status;
|
|
||||||
HTTPDirectory_ptr rootDir;
|
|
||||||
size_t totalDownloaded;
|
|
||||||
HTTPRepository::SyncPredicate syncPredicate;
|
|
||||||
|
|
||||||
HTTP::Request_ptr updateFile(HTTPDirectory* dir, const std::string& name,
|
|
||||||
size_t sz);
|
|
||||||
HTTP::Request_ptr updateDir(HTTPDirectory* dir, const std::string& hash,
|
|
||||||
size_t sz);
|
|
||||||
|
|
||||||
std::string hashForPath(const SGPath& p);
|
|
||||||
void updatedFileContents(const SGPath& p, const std::string& newHash);
|
|
||||||
void parseHashCache();
|
|
||||||
std::string computeHashForPath(const SGPath& p);
|
|
||||||
void writeHashCache();
|
|
||||||
|
|
||||||
void failedToGetRootIndex(HTTPRepository::ResultCode st);
|
|
||||||
void failedToUpdateChild(const SGPath& relativePath,
|
|
||||||
HTTPRepository::ResultCode fileStatus);
|
|
||||||
|
|
||||||
typedef std::vector<RepoRequestPtr> RequestVector;
|
|
||||||
RequestVector queuedRequests,
|
|
||||||
activeRequests;
|
|
||||||
|
|
||||||
void makeRequest(RepoRequestPtr req);
|
|
||||||
void finishedRequest(const RepoRequestPtr& req);
|
|
||||||
|
|
||||||
HTTPDirectory* getOrCreateDirectory(const std::string& path);
|
|
||||||
bool deleteDirectory(const std::string& relPath, const SGPath& absPath);
|
|
||||||
|
|
||||||
typedef std::vector<HTTPDirectory_ptr> DirectoryVector;
|
|
||||||
DirectoryVector directories;
|
|
||||||
|
|
||||||
SGPath installedCopyPath;
|
|
||||||
};
|
|
||||||
|
|
||||||
class HTTPDirectory
|
class HTTPDirectory
|
||||||
{
|
{
|
||||||
@ -243,6 +154,8 @@ public:
|
|||||||
children.clear();
|
children.clear();
|
||||||
parseDirIndex(children);
|
parseDirIndex(children);
|
||||||
std::sort(children.begin(), children.end());
|
std::sort(children.begin(), children.end());
|
||||||
|
|
||||||
|
_repository->updatedChildSuccessfully(_relativePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
void failedToUpdate(HTTPRepository::ResultCode status)
|
void failedToUpdate(HTTPRepository::ResultCode status)
|
||||||
@ -470,9 +383,14 @@ public:
|
|||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "Checksum error for " << absolutePath() << "/" << file << " " << it->hash << " " << hash);
|
SG_LOG(SG_TERRASYNC, SG_WARN, "Checksum error for " << absolutePath() << "/" << file << " " << it->hash << " " << hash);
|
||||||
// we don't erase the file on a hash mismatch, because if we're syncing during the
|
// we don't erase the file on a hash mismatch, because if we're syncing during the
|
||||||
// middle of a server-side update, the downloaded file may actually become valid.
|
// middle of a server-side update, the downloaded file may actually become valid.
|
||||||
_repository->failedToUpdateChild(_relativePath, HTTPRepository::REPO_ERROR_CHECKSUM);
|
_repository->failedToUpdateChild(
|
||||||
|
_relativePath + "/" + file,
|
||||||
|
HTTPRepository::REPO_ERROR_CHECKSUM);
|
||||||
} else {
|
} else {
|
||||||
_repository->updatedFileContents(it->path, hash);
|
_repository->updatedFileContents(it->path, hash);
|
||||||
|
_repository->updatedChildSuccessfully(_relativePath + "/" +
|
||||||
|
file);
|
||||||
|
|
||||||
_repository->totalDownloaded += sz;
|
_repository->totalDownloaded += sz;
|
||||||
SGPath p = SGPath(absolutePath(), file);
|
SGPath p = SGPath(absolutePath(), file);
|
||||||
|
|
||||||
@ -779,6 +697,10 @@ std::string HTTPRepository::resultCodeAsString(ResultCode code)
|
|||||||
return innerResultCodeAsString(code);
|
return innerResultCodeAsString(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HTTPRepository::FailureVec HTTPRepository::failures() const {
|
||||||
|
return _d->failures;
|
||||||
|
}
|
||||||
|
|
||||||
void HTTPRepository::setFilter(SyncPredicate sp) { _d->syncPredicate = sp; }
|
void HTTPRepository::setFilter(SyncPredicate sp) { _d->syncPredicate = sp; }
|
||||||
|
|
||||||
HTTPRepository::ResultCode
|
HTTPRepository::ResultCode
|
||||||
@ -809,61 +731,80 @@ HTTPRepository::failure() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void gotBodyData(const char* s, int n)
|
void gotBodyData(const char *s, int n) override {
|
||||||
{
|
if (!file.get()) {
|
||||||
if (!file.get()) {
|
file.reset(new SGBinaryFile(pathInRepo));
|
||||||
file.reset(new SGBinaryFile(pathInRepo));
|
if (!file->open(SG_IO_OUT)) {
|
||||||
if (!file->open(SG_IO_OUT)) {
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "unable to create file " << pathInRepo);
|
"unable to create file " << pathInRepo);
|
||||||
_directory->repository()->http->cancelRequest(this, "Unable to create output file:" + pathInRepo.utf8Str());
|
_directory->repository()->http->cancelRequest(
|
||||||
}
|
this, "Unable to create output file:" + pathInRepo.utf8Str());
|
||||||
|
}
|
||||||
|
|
||||||
sha1_init(&hashContext);
|
sha1_init(&hashContext);
|
||||||
}
|
|
||||||
|
|
||||||
sha1_write(&hashContext, s, n);
|
|
||||||
file->write(s, n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void onDone()
|
sha1_write(&hashContext, s, n);
|
||||||
{
|
file->write(s, n);
|
||||||
file->close();
|
}
|
||||||
if (responseCode() == 200) {
|
|
||||||
std::string hash = strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH);
|
|
||||||
_directory->didUpdateFile(fileName, hash, contentSize());
|
|
||||||
} else if (responseCode() == 404) {
|
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "terrasync file not found on server: " << fileName << " for " << _directory->absolutePath());
|
|
||||||
_directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_FILE_NOT_FOUND);
|
|
||||||
} else {
|
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "terrasync file download error on server: " << fileName << " for " << _directory->absolutePath() <<
|
|
||||||
"\n\tserver responded: " << responseCode() << "/" << responseReason());
|
|
||||||
_directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_HTTP);
|
|
||||||
}
|
|
||||||
|
|
||||||
_directory->repository()->finishedRequest(this);
|
void onDone() override {
|
||||||
|
file->close();
|
||||||
|
if (responseCode() == 200) {
|
||||||
|
std::string hash =
|
||||||
|
strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH);
|
||||||
|
_directory->didUpdateFile(fileName, hash, contentSize());
|
||||||
|
} else if (responseCode() == 404) {
|
||||||
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
|
"terrasync file not found on server: "
|
||||||
|
<< fileName << " for " << _directory->absolutePath());
|
||||||
|
_directory->didFailToUpdateFile(
|
||||||
|
fileName, HTTPRepository::REPO_ERROR_FILE_NOT_FOUND);
|
||||||
|
} else {
|
||||||
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
|
"terrasync file download error on server: "
|
||||||
|
<< fileName << " for " << _directory->absolutePath()
|
||||||
|
<< "\n\tserver responded: " << responseCode() << "/"
|
||||||
|
<< responseReason());
|
||||||
|
_directory->didFailToUpdateFile(fileName,
|
||||||
|
HTTPRepository::REPO_ERROR_HTTP);
|
||||||
|
// should we every retry here?
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void onFail()
|
_directory->repository()->finishedRequest(
|
||||||
{
|
this, HTTPRepoPrivate::RequestFinish::Done);
|
||||||
HTTPRepository::ResultCode code = HTTPRepository::REPO_ERROR_SOCKET;
|
}
|
||||||
if (responseCode() == -1) {
|
|
||||||
code = HTTPRepository::REPO_ERROR_CANCELLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (file) {
|
void onFail() override {
|
||||||
file->close();
|
HTTPRepository::ResultCode code = HTTPRepository::REPO_ERROR_SOCKET;
|
||||||
}
|
if (responseCode() == -1) {
|
||||||
|
code = HTTPRepository::REPO_ERROR_CANCELLED;
|
||||||
file.reset();
|
|
||||||
if (pathInRepo.exists()) {
|
|
||||||
pathInRepo.remove();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_directory) {
|
|
||||||
_directory->didFailToUpdateFile(fileName, code);
|
|
||||||
_directory->repository()->finishedRequest(this);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (file) {
|
||||||
|
file->close();
|
||||||
|
}
|
||||||
|
|
||||||
|
file.reset();
|
||||||
|
if (pathInRepo.exists()) {
|
||||||
|
pathInRepo.remove();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_directory) {
|
||||||
|
_directory->didFailToUpdateFile(fileName, code);
|
||||||
|
|
||||||
|
const auto doRetry = code == HTTPRepository::REPO_ERROR_SOCKET
|
||||||
|
? HTTPRepoPrivate::RequestFinish::Retry
|
||||||
|
: HTTPRepoPrivate::RequestFinish::Done;
|
||||||
|
_directory->repository()->finishedRequest(this, doRetry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void prepareForRetry() override {
|
||||||
|
HTTP::Request::prepareForRetry();
|
||||||
|
file.reset();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static std::string makeUrl(HTTPDirectory* d, const std::string& file)
|
static std::string makeUrl(HTTPDirectory* d, const std::string& file)
|
||||||
{
|
{
|
||||||
@ -897,80 +838,106 @@ HTTPRepository::failure() const
|
|||||||
return _isRootDir;
|
return _isRootDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
void prepareForRetry() override {
|
||||||
virtual void gotBodyData(const char* s, int n)
|
body.clear();
|
||||||
{
|
sha1_init(&hashContext);
|
||||||
body += std::string(s, n);
|
HTTP::Request::prepareForRetry();
|
||||||
sha1_write(&hashContext, s, n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void onDone()
|
protected:
|
||||||
{
|
void gotBodyData(const char *s, int n) override {
|
||||||
if (responseCode() == 200) {
|
body += std::string(s, n);
|
||||||
std::string hash = strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH);
|
sha1_write(&hashContext, s, n);
|
||||||
if (!_targetHash.empty() && (hash != _targetHash)) {
|
}
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN,
|
|
||||||
"Checksum error getting dirIndex for:"
|
|
||||||
<< _directory->relativePath() << "; expected "
|
|
||||||
<< _targetHash << " but received " << hash);
|
|
||||||
|
|
||||||
_directory->failedToUpdate(
|
void onDone() override {
|
||||||
HTTPRepository::REPO_ERROR_CHECKSUM);
|
if (responseCode() == 200) {
|
||||||
_directory->repository()->finishedRequest(this);
|
std::string hash =
|
||||||
return;
|
strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH);
|
||||||
}
|
if (!_targetHash.empty() && (hash != _targetHash)) {
|
||||||
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
|
"Checksum error getting dirIndex for:"
|
||||||
|
<< _directory->relativePath() << "; expected "
|
||||||
|
<< _targetHash << " but received " << hash);
|
||||||
|
|
||||||
std::string curHash = _directory->repository()->hashForPath(path());
|
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_CHECKSUM);
|
||||||
if (hash != curHash) {
|
|
||||||
simgear::Dir d(_directory->absolutePath());
|
|
||||||
if (!d.exists()) {
|
|
||||||
if (!d.create(0700)) {
|
|
||||||
throw sg_io_exception("Unable to create directory", d.path());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dir index data has changed, so write to disk and update
|
// don't retry checkums failures
|
||||||
// the hash accordingly
|
_directory->repository()->finishedRequest(
|
||||||
sg_ofstream of(pathInRepo(), std::ios::trunc | std::ios::out | std::ios::binary);
|
this, HTTPRepoPrivate::RequestFinish::Done);
|
||||||
if (!of.is_open()) {
|
return;
|
||||||
throw sg_io_exception("Failed to open directory index file for writing", pathInRepo());
|
|
||||||
}
|
|
||||||
|
|
||||||
of.write(body.data(), body.size());
|
|
||||||
of.close();
|
|
||||||
_directory->dirIndexUpdated(hash);
|
|
||||||
|
|
||||||
//SG_LOG(SG_TERRASYNC, SG_INFO, "updated dir index " << _directory->absolutePath());
|
|
||||||
}
|
|
||||||
|
|
||||||
_directory->repository()->totalDownloaded += contentSize();
|
|
||||||
|
|
||||||
try {
|
|
||||||
// either way we've confirmed the index is valid so update
|
|
||||||
// children now
|
|
||||||
SGTimeStamp st;
|
|
||||||
st.stamp();
|
|
||||||
_directory->updateChildrenBasedOnHash();
|
|
||||||
SG_LOG(SG_TERRASYNC, SG_DEBUG, "after update of:" << _directory->absolutePath() << " child update took:" << st.elapsedMSec());
|
|
||||||
} catch (sg_exception& ) {
|
|
||||||
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_IO);
|
|
||||||
}
|
|
||||||
} else if (responseCode() == 404) {
|
|
||||||
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_FILE_NOT_FOUND);
|
|
||||||
} else {
|
|
||||||
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_HTTP);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_directory->repository()->finishedRequest(this);
|
std::string curHash = _directory->repository()->hashForPath(path());
|
||||||
|
if (hash != curHash) {
|
||||||
|
simgear::Dir d(_directory->absolutePath());
|
||||||
|
if (!d.exists()) {
|
||||||
|
if (!d.create(0700)) {
|
||||||
|
throw sg_io_exception("Unable to create directory", d.path());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dir index data has changed, so write to disk and update
|
||||||
|
// the hash accordingly
|
||||||
|
sg_ofstream of(pathInRepo(), std::ios::trunc | std::ios::out |
|
||||||
|
std::ios::binary);
|
||||||
|
if (!of.is_open()) {
|
||||||
|
throw sg_io_exception(
|
||||||
|
"Failed to open directory index file for writing",
|
||||||
|
pathInRepo());
|
||||||
|
}
|
||||||
|
|
||||||
|
of.write(body.data(), body.size());
|
||||||
|
of.close();
|
||||||
|
_directory->dirIndexUpdated(hash);
|
||||||
|
|
||||||
|
// SG_LOG(SG_TERRASYNC, SG_INFO, "updated dir index " <<
|
||||||
|
// _directory->absolutePath());
|
||||||
|
}
|
||||||
|
|
||||||
|
_directory->repository()->totalDownloaded += contentSize();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// either way we've confirmed the index is valid so update
|
||||||
|
// children now
|
||||||
|
SGTimeStamp st;
|
||||||
|
st.stamp();
|
||||||
|
_directory->updateChildrenBasedOnHash();
|
||||||
|
SG_LOG(SG_TERRASYNC, SG_DEBUG,
|
||||||
|
"after update of:" << _directory->absolutePath()
|
||||||
|
<< " child update took:"
|
||||||
|
<< st.elapsedMSec());
|
||||||
|
} catch (sg_exception &) {
|
||||||
|
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_IO);
|
||||||
|
}
|
||||||
|
} else if (responseCode() == 404) {
|
||||||
|
_directory->failedToUpdate(
|
||||||
|
HTTPRepository::REPO_ERROR_FILE_NOT_FOUND);
|
||||||
|
} else {
|
||||||
|
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_HTTP);
|
||||||
|
}
|
||||||
|
|
||||||
|
_directory->repository()->finishedRequest(
|
||||||
|
this, HTTPRepoPrivate::RequestFinish::Done);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void onFail()
|
void onFail() override {
|
||||||
{
|
HTTPRepository::ResultCode code = HTTPRepository::REPO_ERROR_SOCKET;
|
||||||
if (_directory) {
|
if (responseCode() == -1) {
|
||||||
_directory->failedToUpdate(HTTPRepository::REPO_ERROR_SOCKET);
|
code = HTTPRepository::REPO_ERROR_CANCELLED;
|
||||||
_directory->repository()->finishedRequest(this);
|
}
|
||||||
}
|
|
||||||
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
|
"Socket failure getting directory: " << url());
|
||||||
|
if (_directory) {
|
||||||
|
_directory->failedToUpdate(code);
|
||||||
|
const auto doRetry = code == HTTPRepository::REPO_ERROR_SOCKET
|
||||||
|
? HTTPRepoPrivate::RequestFinish::Retry
|
||||||
|
: HTTPRepoPrivate::RequestFinish::Done;
|
||||||
|
_directory->repository()->finishedRequest(this, doRetry);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static std::string makeUrl(HTTPDirectory* d)
|
static std::string makeUrl(HTTPDirectory* d)
|
||||||
{
|
{
|
||||||
@ -1194,10 +1161,9 @@ HTTPRepository::failure() const
|
|||||||
DirectoryWithPath p(relPath);
|
DirectoryWithPath p(relPath);
|
||||||
auto it = std::find_if(directories.begin(), directories.end(), p);
|
auto it = std::find_if(directories.begin(), directories.end(), p);
|
||||||
if (it != directories.end()) {
|
if (it != directories.end()) {
|
||||||
HTTPDirectory* d = it->get();
|
assert((*it)->absolutePath() == absPath);
|
||||||
assert(d->absolutePath() == absPath);
|
directories.erase(it);
|
||||||
directories.erase(it);
|
} else {
|
||||||
} else {
|
|
||||||
// we encounter this code path when deleting an orphaned directory
|
// we encounter this code path when deleting an orphaned directory
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1213,40 +1179,46 @@ HTTPRepository::failure() const
|
|||||||
void HTTPRepoPrivate::makeRequest(RepoRequestPtr req)
|
void HTTPRepoPrivate::makeRequest(RepoRequestPtr req)
|
||||||
{
|
{
|
||||||
if (activeRequests.size() > 4) {
|
if (activeRequests.size() > 4) {
|
||||||
queuedRequests.push_back(req);
|
queuedRequests.push_back(req);
|
||||||
} else {
|
} else {
|
||||||
activeRequests.push_back(req);
|
activeRequests.push_back(req);
|
||||||
http->makeRequest(req);
|
http->makeRequest(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void HTTPRepoPrivate::finishedRequest(const RepoRequestPtr& req)
|
void HTTPRepoPrivate::finishedRequest(const RepoRequestPtr &req,
|
||||||
{
|
RequestFinish retryRequest) {
|
||||||
RequestVector::iterator it = std::find(activeRequests.begin(), activeRequests.end(), req);
|
auto it = std::find(activeRequests.begin(), activeRequests.end(), req);
|
||||||
// in some cases, for example a checksum failure, we clear the active
|
// in some cases, we clear the active
|
||||||
// and queued request vectors, so the ::find above can fail
|
// and queued request vectors, so the ::find above can fail
|
||||||
if (it != activeRequests.end()) {
|
if (it != activeRequests.end()) {
|
||||||
activeRequests.erase(it);
|
activeRequests.erase(it);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!queuedRequests.empty()) {
|
if (retryRequest == HTTPRepoPrivate::RequestFinish::Retry) {
|
||||||
RepoRequestPtr rr = queuedRequests.front();
|
SG_LOG(SG_TERRASYNC, SG_INFO, "Retrying request for:" << req->url());
|
||||||
queuedRequests.erase(queuedRequests.begin());
|
req->prepareForRetry();
|
||||||
activeRequests.push_back(rr);
|
queuedRequests.push_back(req);
|
||||||
http->makeRequest(rr);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// rate limit how often we write this, since otherwise
|
if (!queuedRequests.empty()) {
|
||||||
// it dominates the time on Windows. 256 seems about right,
|
RepoRequestPtr rr = queuedRequests.front();
|
||||||
// causes a write a few times a minute.
|
queuedRequests.erase(queuedRequests.begin());
|
||||||
if (hashCacheDirty > 256) {
|
activeRequests.push_back(rr);
|
||||||
writeHashCache();
|
http->makeRequest(rr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (activeRequests.empty() && queuedRequests.empty()) {
|
// rate limit how often we write this, since otherwise
|
||||||
isUpdating = false;
|
// it dominates the time on Windows. 256 seems about right,
|
||||||
writeHashCache();
|
// causes a write a few times a minute.
|
||||||
}
|
if (hashCacheDirty > 256) {
|
||||||
|
writeHashCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (activeRequests.empty() && queuedRequests.empty()) {
|
||||||
|
isUpdating = false;
|
||||||
|
writeHashCache();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void HTTPRepoPrivate::failedToGetRootIndex(HTTPRepository::ResultCode st)
|
void HTTPRepoPrivate::failedToGetRootIndex(HTTPRepository::ResultCode st)
|
||||||
@ -1262,36 +1234,49 @@ HTTPRepository::failure() const
|
|||||||
void HTTPRepoPrivate::failedToUpdateChild(const SGPath& relativePath,
|
void HTTPRepoPrivate::failedToUpdateChild(const SGPath& relativePath,
|
||||||
HTTPRepository::ResultCode fileStatus)
|
HTTPRepository::ResultCode fileStatus)
|
||||||
{
|
{
|
||||||
if (fileStatus == HTTPRepository::REPO_ERROR_CHECKSUM) {
|
if (fileStatus == HTTPRepository::REPO_ERROR_CANCELLED) {
|
||||||
// stop updating, and mark repository as failed, becuase this
|
// if we were cancelled, don't report or log
|
||||||
// usually indicates we need to start a fresh update from the
|
return;
|
||||||
// root.
|
} else {
|
||||||
// (we could issue a retry here, but we leave that to higher layers)
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
status = fileStatus;
|
"failed to update entry:" << relativePath << " status/code: "
|
||||||
|
<< innerResultCodeAsString(fileStatus)
|
||||||
|
<< "/" << fileStatus);
|
||||||
|
}
|
||||||
|
|
||||||
queuedRequests.clear();
|
HTTPRepository::Failure f;
|
||||||
|
f.path = relativePath;
|
||||||
|
f.error = fileStatus;
|
||||||
|
failures.push_back(f);
|
||||||
|
|
||||||
RequestVector copyOfActive(activeRequests);
|
if (failures.size() >= maxPermittedFailures) {
|
||||||
RequestVector::iterator rq;
|
SG_LOG(SG_TERRASYNC, SG_WARN,
|
||||||
for (rq = copyOfActive.begin(); rq != copyOfActive.end(); ++rq) {
|
"Repo:" << baseUrl << " exceeded failure count ("
|
||||||
http->cancelRequest(*rq, "Repository updated failed due to checksum error");
|
<< failures.size() << "), abandoning");
|
||||||
}
|
|
||||||
|
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update repository:" << baseUrl
|
status = HTTPRepository::REPO_PARTIAL_UPDATE;
|
||||||
<< "\n\tchecksum failure for: " << relativePath
|
|
||||||
<< "\n\tthis typically indicates the remote repository is corrupt or was being updated during the sync");
|
queuedRequests.clear();
|
||||||
} else if (fileStatus == HTTPRepository::REPO_ERROR_CANCELLED) {
|
auto copyOfActiveRequests = activeRequests;
|
||||||
// if we were cancelled, don't report or log
|
for (auto rq : copyOfActiveRequests) {
|
||||||
return;
|
http->cancelRequest(rq,
|
||||||
} else {
|
"Abandoning repo sync due to multiple failures");
|
||||||
SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update entry:" << relativePath << " status/code: "
|
|
||||||
<< innerResultCodeAsString(fileStatus) << "/" << fileStatus);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Failure f;
|
void HTTPRepoPrivate::updatedChildSuccessfully(const SGPath &relativePath) {
|
||||||
f.path = relativePath;
|
if (failures.empty()) {
|
||||||
f.error = fileStatus;
|
return;
|
||||||
failures.push_back(f);
|
}
|
||||||
|
|
||||||
|
// find and remove any existing failures for that path
|
||||||
|
failures.erase(
|
||||||
|
std::remove_if(failures.begin(), failures.end(),
|
||||||
|
[relativePath](const HTTPRepository::Failure &f) {
|
||||||
|
return f.path == relativePath;
|
||||||
|
}),
|
||||||
|
failures.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // of namespace simgear
|
} // of namespace simgear
|
||||||
|
@ -33,64 +33,77 @@ class HTTPRepoPrivate;
|
|||||||
class HTTPRepository
|
class HTTPRepository
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
enum ResultCode {
|
enum ResultCode {
|
||||||
REPO_NO_ERROR = 0,
|
REPO_NO_ERROR = 0,
|
||||||
REPO_ERROR_NOT_FOUND,
|
REPO_ERROR_NOT_FOUND,
|
||||||
REPO_ERROR_SOCKET,
|
REPO_ERROR_SOCKET,
|
||||||
SVN_ERROR_XML,
|
SVN_ERROR_XML,
|
||||||
SVN_ERROR_TXDELTA,
|
SVN_ERROR_TXDELTA,
|
||||||
REPO_ERROR_IO,
|
REPO_ERROR_IO,
|
||||||
REPO_ERROR_CHECKSUM,
|
REPO_ERROR_CHECKSUM,
|
||||||
REPO_ERROR_FILE_NOT_FOUND,
|
REPO_ERROR_FILE_NOT_FOUND,
|
||||||
REPO_ERROR_HTTP,
|
REPO_ERROR_HTTP,
|
||||||
REPO_ERROR_CANCELLED,
|
REPO_ERROR_CANCELLED,
|
||||||
REPO_PARTIAL_UPDATE
|
REPO_PARTIAL_UPDATE ///< repository is working, but file-level failures
|
||||||
};
|
///< occurred
|
||||||
|
};
|
||||||
|
|
||||||
HTTPRepository(const SGPath& root, HTTP::Client* cl);
|
HTTPRepository(const SGPath &root, HTTP::Client *cl);
|
||||||
virtual ~HTTPRepository();
|
virtual ~HTTPRepository();
|
||||||
|
|
||||||
virtual SGPath fsBase() const;
|
virtual SGPath fsBase() const;
|
||||||
|
|
||||||
virtual void setBaseUrl(const std::string& url);
|
virtual void setBaseUrl(const std::string &url);
|
||||||
virtual std::string baseUrl() const;
|
virtual std::string baseUrl() const;
|
||||||
|
|
||||||
virtual HTTP::Client* http() const;
|
virtual HTTP::Client *http() const;
|
||||||
|
|
||||||
virtual void update();
|
virtual void update();
|
||||||
|
|
||||||
virtual bool isDoingSync() const;
|
virtual bool isDoingSync() const;
|
||||||
|
|
||||||
virtual ResultCode failure() const;
|
virtual ResultCode failure() const;
|
||||||
|
|
||||||
virtual size_t bytesToDownload() const;
|
virtual size_t bytesToDownload() const;
|
||||||
|
|
||||||
virtual size_t bytesDownloaded() const;
|
virtual size_t bytesDownloaded() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* optionally provide the location of an installer copy of this
|
* optionally provide the location of an installer copy of this
|
||||||
* repository. When a file is missing it will be copied from this tree.
|
* repository. When a file is missing it will be copied from this tree.
|
||||||
*/
|
*/
|
||||||
void setInstalledCopyPath(const SGPath& copyPath);
|
void setInstalledCopyPath(const SGPath ©Path);
|
||||||
|
|
||||||
static std::string resultCodeAsString(ResultCode code);
|
|
||||||
|
|
||||||
enum class SyncAction { Add, Update, Delete, UpToDate };
|
static std::string resultCodeAsString(ResultCode code);
|
||||||
|
|
||||||
enum EntryType { FileType, DirectoryType, TarballType };
|
enum class SyncAction { Add, Update, Delete, UpToDate };
|
||||||
|
|
||||||
struct SyncItem {
|
enum EntryType { FileType, DirectoryType, TarballType };
|
||||||
const std::string directory; // relative path in the repository
|
|
||||||
const EntryType type;
|
struct SyncItem {
|
||||||
const std::string filename;
|
const std::string directory; // relative path in the repository
|
||||||
const SyncAction action;
|
const EntryType type;
|
||||||
const SGPath pathOnDisk; // path the entry does / will have
|
const std::string filename;
|
||||||
};
|
const SyncAction action;
|
||||||
|
const SGPath pathOnDisk; // path the entry does / will have
|
||||||
|
};
|
||||||
|
|
||||||
using SyncPredicate = std::function<bool(const SyncItem &item)>;
|
using SyncPredicate = std::function<bool(const SyncItem &item)>;
|
||||||
|
|
||||||
void setFilter(SyncPredicate sp);
|
void setFilter(SyncPredicate sp);
|
||||||
|
|
||||||
|
struct Failure {
|
||||||
|
SGPath path;
|
||||||
|
ResultCode error;
|
||||||
|
};
|
||||||
|
|
||||||
|
using FailureVec = std::vector<Failure>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief return file-level failures
|
||||||
|
*/
|
||||||
|
FailureVec failures() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool isBare() const;
|
bool isBare() const;
|
||||||
|
|
||||||
|
122
simgear/io/HTTPRepository_private.hxx
Normal file
122
simgear/io/HTTPRepository_private.hxx
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// HTTPRepository.cxx -- plain HTTP TerraSync remote client
|
||||||
|
//
|
||||||
|
// Copyright (C) 20126 James Turner <zakalawe@mac.com>
|
||||||
|
//
|
||||||
|
// This program is free software; you can redistribute it and/or
|
||||||
|
// modify it under the terms of the GNU General Public License as
|
||||||
|
// published by the Free Software Foundation; either version 2 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful, but
|
||||||
|
// WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
// General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with this program; if not, write to the Free Software
|
||||||
|
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
|
||||||
|
// USA.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include <simgear/io/HTTPClient.hxx>
|
||||||
|
#include <simgear/misc/sg_path.hxx>
|
||||||
|
|
||||||
|
#include "HTTPRepository.hxx"
|
||||||
|
|
||||||
|
namespace simgear {
|
||||||
|
|
||||||
|
class HTTPDirectory;
|
||||||
|
using HTTPDirectory_ptr = std::unique_ptr<HTTPDirectory>;
|
||||||
|
|
||||||
|
class HTTPRepoGetRequest : public HTTP::Request {
|
||||||
|
public:
|
||||||
|
HTTPRepoGetRequest(HTTPDirectory *d, const std::string &u)
|
||||||
|
: HTTP::Request(u), _directory(d) {}
|
||||||
|
|
||||||
|
virtual void cancel();
|
||||||
|
|
||||||
|
size_t contentSize() const { return _contentSize; }
|
||||||
|
|
||||||
|
void setContentSize(size_t sz) { _contentSize = sz; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
HTTPDirectory *_directory;
|
||||||
|
size_t _contentSize = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef SGSharedPtr<HTTPRepoGetRequest> RepoRequestPtr;
|
||||||
|
|
||||||
|
class HTTPRepoPrivate {
|
||||||
|
public:
|
||||||
|
struct HashCacheEntry {
|
||||||
|
std::string filePath;
|
||||||
|
time_t modTime;
|
||||||
|
size_t lengthBytes;
|
||||||
|
std::string hashHex;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::vector<HashCacheEntry> HashCache;
|
||||||
|
HashCache hashes;
|
||||||
|
int hashCacheDirty = 0;
|
||||||
|
|
||||||
|
HTTPRepository::FailureVec failures;
|
||||||
|
int maxPermittedFailures = 16;
|
||||||
|
|
||||||
|
HTTPRepoPrivate(HTTPRepository *parent)
|
||||||
|
: p(parent), isUpdating(false), status(HTTPRepository::REPO_NO_ERROR),
|
||||||
|
totalDownloaded(0) {
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
~HTTPRepoPrivate();
|
||||||
|
|
||||||
|
HTTPRepository *p; // link back to outer
|
||||||
|
HTTP::Client *http;
|
||||||
|
std::string baseUrl;
|
||||||
|
SGPath basePath;
|
||||||
|
bool isUpdating;
|
||||||
|
HTTPRepository::ResultCode status;
|
||||||
|
HTTPDirectory_ptr rootDir;
|
||||||
|
size_t totalDownloaded;
|
||||||
|
HTTPRepository::SyncPredicate syncPredicate;
|
||||||
|
|
||||||
|
HTTP::Request_ptr updateFile(HTTPDirectory *dir, const std::string &name,
|
||||||
|
size_t sz);
|
||||||
|
HTTP::Request_ptr updateDir(HTTPDirectory *dir, const std::string &hash,
|
||||||
|
size_t sz);
|
||||||
|
|
||||||
|
std::string hashForPath(const SGPath &p);
|
||||||
|
void updatedFileContents(const SGPath &p, const std::string &newHash);
|
||||||
|
void parseHashCache();
|
||||||
|
std::string computeHashForPath(const SGPath &p);
|
||||||
|
void writeHashCache();
|
||||||
|
|
||||||
|
void failedToGetRootIndex(HTTPRepository::ResultCode st);
|
||||||
|
void failedToUpdateChild(const SGPath &relativePath,
|
||||||
|
HTTPRepository::ResultCode fileStatus);
|
||||||
|
|
||||||
|
void updatedChildSuccessfully(const SGPath &relativePath);
|
||||||
|
|
||||||
|
typedef std::vector<RepoRequestPtr> RequestVector;
|
||||||
|
RequestVector queuedRequests, activeRequests;
|
||||||
|
|
||||||
|
void makeRequest(RepoRequestPtr req);
|
||||||
|
|
||||||
|
enum class RequestFinish { Done, Retry };
|
||||||
|
|
||||||
|
void finishedRequest(const RepoRequestPtr &req, RequestFinish retryRequest);
|
||||||
|
|
||||||
|
HTTPDirectory *getOrCreateDirectory(const std::string &path);
|
||||||
|
bool deleteDirectory(const std::string &relPath, const SGPath &absPath);
|
||||||
|
|
||||||
|
typedef std::vector<HTTPDirectory_ptr> DirectoryVector;
|
||||||
|
DirectoryVector directories;
|
||||||
|
|
||||||
|
SGPath installedCopyPath;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace simgear
|
@ -56,6 +56,15 @@ Request::~Request()
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Request::prepareForRetry() {
|
||||||
|
setReadyState(UNSENT);
|
||||||
|
_willClose = false;
|
||||||
|
_connectionCloseHeader = false;
|
||||||
|
_responseStatus = 0;
|
||||||
|
_responseLength = 0;
|
||||||
|
_receivedBodyBytes = 0;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
Request* Request::done(const Callback& cb)
|
Request* Request::done(const Callback& cb)
|
||||||
{
|
{
|
||||||
|
@ -208,7 +208,9 @@ public:
|
|||||||
*/
|
*/
|
||||||
bool serverSupportsPipelining() const;
|
bool serverSupportsPipelining() const;
|
||||||
|
|
||||||
protected:
|
virtual void prepareForRetry();
|
||||||
|
|
||||||
|
protected:
|
||||||
Request(const std::string& url, const std::string method = "GET");
|
Request(const std::string& url, const std::string method = "GET");
|
||||||
|
|
||||||
virtual void requestStart();
|
virtual void requestStart();
|
||||||
@ -222,12 +224,14 @@ protected:
|
|||||||
virtual void onFail();
|
virtual void onFail();
|
||||||
virtual void onAlways();
|
virtual void onAlways();
|
||||||
|
|
||||||
void setFailure(int code, const std::string& reason);
|
|
||||||
void setSuccess(int code);
|
void setSuccess(int code);
|
||||||
private:
|
void setFailure(int code, const std::string &reason);
|
||||||
|
|
||||||
|
private:
|
||||||
friend class Client;
|
friend class Client;
|
||||||
friend class Connection;
|
friend class Connection;
|
||||||
friend class ContentDecoder;
|
friend class ContentDecoder;
|
||||||
|
friend class TestApi;
|
||||||
|
|
||||||
Request(const Request&); // = delete;
|
Request(const Request&); // = delete;
|
||||||
Request& operator=(const Request&); // = delete;
|
Request& operator=(const Request&); // = delete;
|
||||||
|
45
simgear/io/HTTPTestApi_private.hxx
Normal file
45
simgear/io/HTTPTestApi_private.hxx
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// This library is free software; you can redistribute it and/or
|
||||||
|
// modify it under the terms of the GNU Library General Public
|
||||||
|
// License as published by the Free Software Foundation; either
|
||||||
|
// version 2 of the License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
// Library General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Library General Public
|
||||||
|
// License along with this library; if not, write to the Free Software
|
||||||
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
#include "HTTPRequest.hxx"
|
||||||
|
|
||||||
|
namespace simgear {
|
||||||
|
namespace HTTP {
|
||||||
|
|
||||||
|
class Client;
|
||||||
|
|
||||||
|
using ResponseDoneCallback =
|
||||||
|
std::function<bool(int curlResult, Request_ptr req)>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief this API is for unit-testing HTTP code.
|
||||||
|
* Don't use it for anything else. It's for unit-testing.
|
||||||
|
*/
|
||||||
|
class TestApi {
|
||||||
|
public:
|
||||||
|
// alow test suite to manipulate requests to simulate network errors;
|
||||||
|
// without this, it's hard to provoke certain failures in a loop-back
|
||||||
|
// network sitation.
|
||||||
|
static void setResponseDoneCallback(Client *cl, ResponseDoneCallback cb);
|
||||||
|
|
||||||
|
static void markRequestAsFailed(Request_ptr req, int curlCode,
|
||||||
|
const std::string &message);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace HTTP
|
||||||
|
} // namespace simgear
|
@ -1,16 +1,18 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <functional>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <errno.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
|
|
||||||
#include <simgear/simgear_config.h>
|
#include <simgear/simgear_config.h>
|
||||||
|
|
||||||
#include "test_HTTP.hxx"
|
|
||||||
#include "HTTPRepository.hxx"
|
|
||||||
#include "HTTPClient.hxx"
|
#include "HTTPClient.hxx"
|
||||||
|
#include "HTTPRepository.hxx"
|
||||||
|
#include "HTTPTestApi_private.hxx"
|
||||||
|
#include "test_HTTP.hxx"
|
||||||
|
|
||||||
#include <simgear/misc/strutils.hxx>
|
#include <simgear/misc/strutils.hxx>
|
||||||
#include <simgear/misc/sg_hash.hxx>
|
#include <simgear/misc/sg_hash.hxx>
|
||||||
@ -25,6 +27,8 @@
|
|||||||
|
|
||||||
using namespace simgear;
|
using namespace simgear;
|
||||||
|
|
||||||
|
using TestApi = simgear::HTTP::TestApi;
|
||||||
|
|
||||||
std::string dataForFile(const std::string& parentName, const std::string& name, int revision)
|
std::string dataForFile(const std::string& parentName, const std::string& name, int revision)
|
||||||
{
|
{
|
||||||
std::ostringstream os;
|
std::ostringstream os;
|
||||||
@ -45,6 +49,9 @@ std::string hashForData(const std::string& d)
|
|||||||
return strutils::encodeHex(sha1_result(&info), HASH_LENGTH);
|
return strutils::encodeHex(sha1_result(&info), HASH_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class TestRepoEntry;
|
||||||
|
using AccessCallback = std::function<void(TestRepoEntry &entry)>;
|
||||||
|
|
||||||
class TestRepoEntry
|
class TestRepoEntry
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -70,7 +77,8 @@ public:
|
|||||||
int requestCount;
|
int requestCount;
|
||||||
bool getWillFail;
|
bool getWillFail;
|
||||||
bool returnCorruptData;
|
bool returnCorruptData;
|
||||||
std::unique_ptr<SGCallback> accessCallback;
|
|
||||||
|
AccessCallback accessCallback;
|
||||||
|
|
||||||
void clearRequestCounts();
|
void clearRequestCounts();
|
||||||
|
|
||||||
@ -270,8 +278,8 @@ public:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (entry->accessCallback.get()) {
|
if (entry->accessCallback) {
|
||||||
(*entry->accessCallback)();
|
entry->accessCallback(*entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (entry->getWillFail) {
|
if (entry->getWillFail) {
|
||||||
@ -282,20 +290,29 @@ public:
|
|||||||
entry->requestCount++;
|
entry->requestCount++;
|
||||||
|
|
||||||
std::string content;
|
std::string content;
|
||||||
|
bool closeSocket = false;
|
||||||
|
size_t contentSize = 0;
|
||||||
|
|
||||||
if (entry->returnCorruptData) {
|
if (entry->returnCorruptData) {
|
||||||
content = dataForFile("!$£$!" + entry->parent->name,
|
content = dataForFile("!$£$!" + entry->parent->name,
|
||||||
"corrupt_" + entry->name,
|
"corrupt_" + entry->name,
|
||||||
entry->revision);
|
entry->revision);
|
||||||
|
contentSize = content.size();
|
||||||
} else {
|
} else {
|
||||||
content = entry->data();
|
content = entry->data();
|
||||||
|
contentSize = content.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::stringstream d;
|
std::stringstream d;
|
||||||
d << "HTTP/1.1 " << 200 << " " << reasonForCode(200) << "\r\n";
|
d << "HTTP/1.1 " << 200 << " " << reasonForCode(200) << "\r\n";
|
||||||
d << "Content-Length:" << content.size() << "\r\n";
|
d << "Content-Length:" << contentSize << "\r\n";
|
||||||
d << "\r\n"; // final CRLF to terminate the headers
|
d << "\r\n"; // final CRLF to terminate the headers
|
||||||
d << content;
|
d << content;
|
||||||
push(d.str().c_str());
|
push(d.str().c_str());
|
||||||
|
|
||||||
|
if (closeSocket) {
|
||||||
|
closeWhenDone();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sendErrorResponse(404, false, "");
|
sendErrorResponse(404, false, "");
|
||||||
}
|
}
|
||||||
@ -401,6 +418,15 @@ void waitForUpdateComplete(HTTP::Client* cl, HTTPRepository* repo)
|
|||||||
std::cerr << "timed out" << std::endl;
|
std::cerr << "timed out" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void runForTime(HTTP::Client *cl, HTTPRepository *repo, int msec = 15) {
|
||||||
|
SGTimeStamp start(SGTimeStamp::now());
|
||||||
|
while (start.elapsedMSec() < msec) {
|
||||||
|
cl->update();
|
||||||
|
testServer.poll();
|
||||||
|
SGTimeStamp::sleepForMSec(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void testBasicClone(HTTP::Client* cl)
|
void testBasicClone(HTTP::Client* cl)
|
||||||
{
|
{
|
||||||
std::unique_ptr<HTTPRepository> repo;
|
std::unique_ptr<HTTPRepository> repo;
|
||||||
@ -618,9 +644,19 @@ void testAbandonCorruptFiles(HTTP::Client* cl)
|
|||||||
repo->setBaseUrl("http://localhost:2000/repo");
|
repo->setBaseUrl("http://localhost:2000/repo");
|
||||||
repo->update();
|
repo->update();
|
||||||
waitForUpdateComplete(cl, repo.get());
|
waitForUpdateComplete(cl, repo.get());
|
||||||
if (repo->failure() != HTTPRepository::REPO_ERROR_CHECKSUM) {
|
if (repo->failure() != HTTPRepository::REPO_PARTIAL_UPDATE) {
|
||||||
std::cerr << "Got failure state:" << repo->failure() << std::endl;
|
std::cerr << "Got failure state:" << repo->failure() << std::endl;
|
||||||
throw sg_exception("Bad result from corrupt files test");
|
throw sg_exception("Bad result from corrupt files test");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto failedFiles = repo->failures();
|
||||||
|
if (failedFiles.size() != 1) {
|
||||||
|
throw sg_exception("Bad result from corrupt files test");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (failedFiles.front().path.utf8Str() != "dirB/subdirG/fileBGA") {
|
||||||
|
throw sg_exception("Bad path from corrupt files test:" +
|
||||||
|
failedFiles.front().path.utf8Str());
|
||||||
}
|
}
|
||||||
|
|
||||||
repo.reset();
|
repo.reset();
|
||||||
@ -657,15 +693,21 @@ void testServerModifyDuringSync(HTTP::Client* cl)
|
|||||||
repo.reset(new HTTPRepository(p, cl));
|
repo.reset(new HTTPRepository(p, cl));
|
||||||
repo->setBaseUrl("http://localhost:2000/repo");
|
repo->setBaseUrl("http://localhost:2000/repo");
|
||||||
|
|
||||||
global_repo->findEntry("dirA/fileAA")->accessCallback.reset(make_callback(&modifyBTree));
|
global_repo->findEntry("dirA/fileAA")->accessCallback =
|
||||||
|
[](const TestRepoEntry &r) {
|
||||||
|
std::cout << "Modifying sub-tree" << std::endl;
|
||||||
|
global_repo->findEntry("dirB/subdirA/fileBAC")->revision++;
|
||||||
|
global_repo->defineFile("dirB/subdirZ/fileBZA");
|
||||||
|
global_repo->findEntry("dirB/subdirB/fileBBB")->revision++;
|
||||||
|
};
|
||||||
|
|
||||||
repo->update();
|
repo->update();
|
||||||
waitForUpdateComplete(cl, repo.get());
|
waitForUpdateComplete(cl, repo.get());
|
||||||
|
|
||||||
global_repo->findEntry("dirA/fileAA")->accessCallback.reset();
|
global_repo->findEntry("dirA/fileAA")->accessCallback = AccessCallback{};
|
||||||
|
|
||||||
if (repo->failure() != HTTPRepository::REPO_ERROR_CHECKSUM) {
|
if (repo->failure() != HTTPRepository::REPO_PARTIAL_UPDATE) {
|
||||||
throw sg_exception("Bad result from modify during sync test");
|
throw sg_exception("Bad result from modify during sync test");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "Passed test modify server during sync" << std::endl;
|
std::cout << "Passed test modify server during sync" << std::endl;
|
||||||
@ -755,6 +797,103 @@ void testCopyInstalledChildren(HTTP::Client* cl)
|
|||||||
std::cout << "passed Copy installed children" << std::endl;
|
std::cout << "passed Copy installed children" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void testRetryAfterSocketFailure(HTTP::Client *cl) {
|
||||||
|
global_repo->clearRequestCounts();
|
||||||
|
global_repo->clearFailFlags();
|
||||||
|
|
||||||
|
std::unique_ptr<HTTPRepository> repo;
|
||||||
|
SGPath p(simgear::Dir::current().path());
|
||||||
|
p.append("http_repo_retry_after_socket_fail");
|
||||||
|
simgear::Dir pd(p);
|
||||||
|
if (pd.exists()) {
|
||||||
|
pd.removeChildren();
|
||||||
|
}
|
||||||
|
|
||||||
|
repo.reset(new HTTPRepository(p, cl));
|
||||||
|
repo->setBaseUrl("http://localhost:2000/repo");
|
||||||
|
|
||||||
|
int aaFailsRemaining = 2;
|
||||||
|
int subdirBAFailsRemaining = 2;
|
||||||
|
TestApi::setResponseDoneCallback(
|
||||||
|
cl, [&aaFailsRemaining, &subdirBAFailsRemaining](int curlResult,
|
||||||
|
HTTP::Request_ptr req) {
|
||||||
|
if (req->url() == "http://localhost:2000/repo/dirA/fileAA") {
|
||||||
|
if (aaFailsRemaining == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
--aaFailsRemaining;
|
||||||
|
TestApi::markRequestAsFailed(req, 56, "Simulated socket failure");
|
||||||
|
return true;
|
||||||
|
} else if (req->url() ==
|
||||||
|
"http://localhost:2000/repo/dirB/subdirA/.dirindex") {
|
||||||
|
if (subdirBAFailsRemaining == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
--subdirBAFailsRemaining;
|
||||||
|
TestApi::markRequestAsFailed(req, 56, "Simulated socket failure");
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
repo->update();
|
||||||
|
waitForUpdateComplete(cl, repo.get());
|
||||||
|
|
||||||
|
if (repo->failure() != HTTPRepository::REPO_NO_ERROR) {
|
||||||
|
throw sg_exception("Bad result from retry socket failure test");
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyFileState(p, "dirA/fileAA");
|
||||||
|
verifyFileState(p, "dirB/subdirA/fileBAA");
|
||||||
|
verifyFileState(p, "dirB/subdirA/fileBAC");
|
||||||
|
|
||||||
|
verifyRequestCount("dirA/fileAA", 3);
|
||||||
|
verifyRequestCount("dirB/subdirA", 3);
|
||||||
|
verifyRequestCount("dirB/subdirA/fileBAC", 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void testPersistentSocketFailure(HTTP::Client *cl) {
|
||||||
|
global_repo->clearRequestCounts();
|
||||||
|
global_repo->clearFailFlags();
|
||||||
|
|
||||||
|
std::unique_ptr<HTTPRepository> repo;
|
||||||
|
SGPath p(simgear::Dir::current().path());
|
||||||
|
p.append("http_repo_persistent_socket_fail");
|
||||||
|
simgear::Dir pd(p);
|
||||||
|
if (pd.exists()) {
|
||||||
|
pd.removeChildren();
|
||||||
|
}
|
||||||
|
|
||||||
|
repo.reset(new HTTPRepository(p, cl));
|
||||||
|
repo->setBaseUrl("http://localhost:2000/repo");
|
||||||
|
|
||||||
|
TestApi::setResponseDoneCallback(
|
||||||
|
cl, [](int curlResult, HTTP::Request_ptr req) {
|
||||||
|
const auto url = req->url();
|
||||||
|
if (url.find("http://localhost:2000/repo/dirB") == 0) {
|
||||||
|
TestApi::markRequestAsFailed(req, 56, "Simulated socket failure");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
repo->update();
|
||||||
|
waitForUpdateComplete(cl, repo.get());
|
||||||
|
|
||||||
|
if (repo->failure() != HTTPRepository::REPO_PARTIAL_UPDATE) {
|
||||||
|
throw sg_exception("Bad result from retry socket failure test");
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyFileState(p, "dirA/fileAA");
|
||||||
|
verifyRequestCount("dirA/fileAA", 1);
|
||||||
|
|
||||||
|
verifyRequestCount("dirD/fileDA", 1);
|
||||||
|
verifyRequestCount("dirD/subdirDA/fileDAA", 1);
|
||||||
|
verifyRequestCount("dirD/subdirDB/fileDBA", 1);
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char* argv[])
|
int main(int argc, char* argv[])
|
||||||
{
|
{
|
||||||
sglog().setLogLevels( SG_ALL, SG_INFO );
|
sglog().setLogLevels( SG_ALL, SG_INFO );
|
||||||
@ -800,6 +939,8 @@ int main(int argc, char* argv[])
|
|||||||
cl.clearAllConnections();
|
cl.clearAllConnections();
|
||||||
|
|
||||||
testCopyInstalledChildren(&cl);
|
testCopyInstalledChildren(&cl);
|
||||||
|
testRetryAfterSocketFailure(&cl);
|
||||||
|
testPersistentSocketFailure(&cl);
|
||||||
|
|
||||||
std::cout << "all tests passed ok" << std::endl;
|
std::cout << "all tests passed ok" << std::endl;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -744,6 +744,7 @@ void SGTerraSync::WorkerThread::fail(SyncItem failedItem)
|
|||||||
_state._fail_count++;
|
_state._fail_count++;
|
||||||
failedItem._status = SyncItem::Failed;
|
failedItem._status = SyncItem::Failed;
|
||||||
_freshTiles.push_back(failedItem);
|
_freshTiles.push_back(failedItem);
|
||||||
|
// not we also end up here for partial syncs
|
||||||
SG_LOG(SG_TERRASYNC,SG_INFO,
|
SG_LOG(SG_TERRASYNC,SG_INFO,
|
||||||
"Failed to sync'" << failedItem._dir << "'");
|
"Failed to sync'" << failedItem._dir << "'");
|
||||||
_completedTiles[ failedItem._dir ] = now + UpdateInterval::FailedAttempt;
|
_completedTiles[ failedItem._dir ] = now + UpdateInterval::FailedAttempt;
|
||||||
|
Loading…
Reference in New Issue
Block a user