Merge branch 'master' into pbap

This commit is contained in:
Patrick Ohly 2012-03-07 08:22:41 +01:00
commit 90ee964b90
27 changed files with 1885 additions and 524 deletions

View file

@ -119,7 +119,12 @@ TYPE_rpm = -R
# Dependency calculation is intentionally incomplete:
# - don't force dependency on specific EDS libs via backends, their versions change too much (handled via --enable-evolution-compatibility and dynamic loading of the backends)
# - ignore client-test dependencies (because users typically don't run it)
REQUIRES_deb = --requires="'$(shell set -x; cd dist; LD_LIBRARY_PATH=$(distdir)/usr/lib:$(distdir)/usr/lib/syncevolution dpkg-shlibdeps -L$(srcdir)/src/shlibs.local --ignore-missing-info -O $$(for i in $$(find $(distdir) -type f -perm /u+x | grep -v -e client-test -e lib/syncevolution/sync); do if file $$i | grep ELF >/dev/null; then echo $$i; fi; done) | sed -e 's/[^=]*=//')$(REQUIRES_deb_neon)'"
# - be more flexible about kdelibs5 than dpkg-shlibdeps: it is found as package
# for libkdeui.so.5 and libkdecore.so.5 on Ubuntu Lucid, but after Debian
# Squeeze the package was replaced by individual library packages. On such
# distros, libkdeui5 is what we need.
# - same for kdepimlibs5 -> libakonadi-kde4
REQUIRES_deb = --requires="'$(shell set -x; cd dist; LD_LIBRARY_PATH=$(distdir)/usr/lib:$(distdir)/usr/lib/syncevolution dpkg-shlibdeps -L$(srcdir)/src/shlibs.local --ignore-missing-info -O $$(for i in $$(find $(distdir) -type f -perm /u+x | grep -v -e client-test -e lib/syncevolution/sync); do if file $$i | grep ELF >/dev/null; then echo $$i; fi; done) | sed -e 's/kdelibs5 ([^,]*),/kdelibs5 | libkdeui5,/g' -e 's/kdepimlibs5 ([^,]*),/kdepimlibs5 | libakonadi-kde4,/g' -e 's/[^=]*=//')$(REQUIRES_deb_neon)'"
if NEON_COMPATIBILITY
# --enable-neon-compatibility in src/backends/webdav:
# replace dependencies from linking with hard-coded dlopen() dependencies

View file

@ -8,7 +8,7 @@ dnl Invoke autogen.sh to produce a configure script.
#
# Starting with the 1.1 release cycle, the rpm-style
# .99 pseudo-version number is used to mark a pre-release.
AC_INIT([syncevolution], [m4_esyscmd([build/gen-git-version.sh 1.2.2])])
AC_INIT([syncevolution], [m4_esyscmd([build/gen-git-version.sh 1.2.99])])
# STABLE_VERSION=1.0.1+
AC_SUBST(STABLE_VERSION)
@ -498,10 +498,10 @@ then
then
KDEKWALLETFOUND=yes
if ! test "$KDE_KWALLET_CFLAGS"; then
KDE_KWALLET_CFLAGS="-I`$KDE4_CONFIG --path include` -I`$KDE4_CONFIG --path include`/KDE -I`$QMAKE -query QT_INSTALL_HEADERS` `pkg-config --cflags QtDBus`"
KDE_KWALLET_CFLAGS="-I`$KDE4_CONFIG --path include` -I`$KDE4_CONFIG --path include`/KDE `pkg-config --cflags QtDBus QtCore`"
fi
if ! test "$KDE_KWALLET_LIBS"; then
KDE_KWALLET_LIBS="-lkdeui -lkdecore `pkg-config --libs QtDBus`"
KDE_KWALLET_LIBS="-lkdeui -lkdecore -L`kde4-config --install lib` `pkg-config --libs QtDBus QtCore`"
fi
AC_LANG_PUSH(C++)
old_CPPFLAGS="$CPPFLAGS"

View file

@ -38,6 +38,9 @@ ActiveSyncCalendarSource::ActiveSyncCalendarSource(const SyncSourceParams &param
void ActiveSyncCalendarSource::beginSync(const std::string &lastToken, const std::string &resumeToken)
{
// erase content which might have been set in a previous call
reset();
// claim item node for our change tracking
m_trackingNode.swap(m_itemNode);

View file

@ -110,6 +110,7 @@ void CalDAVSource::listAllSubItems(SubRevisionMap_t &revisions)
boost::ref(revisions),
_1, _2, boost::ref(data)));
m_cache.clear();
m_cache.m_initialized = false;
parser.pushHandler(boost::bind(Neon::XMLParser::accept, "urn:ietf:params:xml:ns:caldav", "calendar-data", _2, _3),
boost::bind(Neon::XMLParser::append, boost::ref(data), _2, _3));
Neon::Request report(*getSession(), "REPORT", getCalendar().m_path, query, parser);
@ -180,6 +181,7 @@ void CalDAVSource::updateAllSubItems(SubRevisionMap_t &revisions)
// build list of new or updated entries,
// copy others to cache
m_cache.clear();
m_cache.m_initialized = false;
std::list<std::string> mustRead;
BOOST_FOREACH(const StringPair &item, items) {
SubRevisionMap_t::iterator it = revisions.find(item.first);
@ -188,6 +190,24 @@ void CalDAVSource::updateAllSubItems(SubRevisionMap_t &revisions)
// read current information below
SE_LOG_DEBUG(NULL, NULL, "updateAllSubItems(): read new or modified item %s", item.first.c_str());
mustRead.push_back(item.first);
// The server told us that the item exists. We still need
// to deal with the situation that the server might fail
// to deliver the item data when we ask for it below.
//
// There are two reasons when this can happen: either an
// item was removed in the meantime or the server is
// confused. The latter started to happen reliably with
// the Google Calendar server sometime in January/February
// 2012.
//
// In both cases, let's assume that the item is really gone
// (and not just unreadable due to that other Google Calendar
// bug, see loadItem()+REPORT workaround), and therefore let's
// remove the entry from the revisions.
if (it != revisions.end()) {
revisions.erase(it);
}
m_cache.erase(item.first);
} else {
// copy still relevant information
SE_LOG_DEBUG(NULL, NULL, "updateAllSubItems(): unmodified item %s", it->first.c_str());
@ -203,8 +223,14 @@ void CalDAVSource::updateAllSubItems(SubRevisionMap_t &revisions)
// have problems with providing all of its data via GET or the
// multiget REPORT below. It returns a 404 error for items that a
// calendar-query includes (see loadItem()). Such items are
// ignored it and thus will be silently skipped. This is not
// ignored and thus will be silently skipped. This is not
// perfect, but better than failing the sync.
//
// Unfortunately there are other servers (Radicale, I'm looking at
// you) which simply return neither data nor errors for the
// requested hrefs. To handle that we try the multiget first,
// record retrieved or failed responses, then follow up with
// individual requests for anything that wasn't mentioned.
if (!mustRead.empty()) {
std::stringstream buffer;
buffer << "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"
@ -214,17 +240,19 @@ void CalDAVSource::updateAllSubItems(SubRevisionMap_t &revisions)
" <D:getetag/>\n"
" <C:calendar-data/>\n"
"</D:prop>\n";
BOOST_FOREACH(const std::string &href, mustRead) {
buffer << "<D:href>" << luid2path(href) << "</D:href>\n";
BOOST_FOREACH(const std::string &luid, mustRead) {
buffer << "<D:href>" << luid2path(luid) << "</D:href>\n";
}
buffer << "</C:calendar-multiget>";
std::string query = buffer.str();
std::set<std::string> results; // LUIDs of all hrefs returned by report
getSession()->startOperation("updateAllSubItems REPORT 'multiget new/updated items'", deadline);
while (true) {
string data;
Neon::XMLParser parser;
parser.initReportParser(boost::bind(&CalDAVSource::appendItem, this,
parser.initReportParser(boost::bind(&CalDAVSource::appendMultigetResult, this,
boost::ref(revisions),
boost::ref(results),
_1, _2, boost::ref(data)));
parser.pushHandler(boost::bind(Neon::XMLParser::accept, "urn:ietf:params:xml:ns:caldav", "calendar-data", _2, _3),
boost::bind(Neon::XMLParser::append, boost::ref(data), _2, _3));
@ -236,7 +264,41 @@ void CalDAVSource::updateAllSubItems(SubRevisionMap_t &revisions)
break;
}
}
// Workaround for Radicale 0.6.4: it simply returns nothing (no error, no data).
// Fall back to GET of items with no response.
BOOST_FOREACH(const std::string &luid, mustRead) {
if (results.find(luid) == results.end()) {
getSession()->startOperation(StringPrintf("GET item %s not returned by 'multiget new/updated items'", luid.c_str()),
deadline);
std::string path = luid2path(luid);
std::string data;
std::string etag;
while (true) {
data.clear();
Neon::Request req(*getSession(), "GET", path,
"", data);
req.addHeader("Accept", contentType());
if (req.run()) {
etag = getETag(req);
break;
}
}
appendItem(revisions, path, etag, data);
}
}
}
}
int CalDAVSource::appendMultigetResult(SubRevisionMap_t &revisions,
std::set<std::string> &luids,
const std::string &href,
const std::string &etag,
std::string &data)
{
// record which items were seen in the response...
luids.insert(path2luid(href));
// and store information about them
return appendItem(revisions, href, etag, data);
}
int CalDAVSource::appendItem(SubRevisionMap_t &revisions,
@ -283,6 +345,7 @@ int CalDAVSource::appendItem(SubRevisionMap_t &revisions,
if (entry.m_subids.empty()) {
SE_LOG_DEBUG(NULL, NULL, "ignoring broken item %s (is empty)", davLUID.c_str());
revisions.erase(davLUID);
m_cache.erase(davLUID);
data.clear();
return 0;
}

View file

@ -177,6 +177,14 @@ class CalDAVSource : public WebDAVSource,
std::string getSubDescription(Event &event, const string &subid);
/** calback for multiget: same as appendItem, but also records luid of all responses */
int appendMultigetResult(SubRevisionMap_t &revisions,
std::set<std::string> &luids,
const std::string &href,
const std::string &etag,
std::string &data);
/** callback for listAllSubItems: parse and add new item */
int appendItem(SubRevisionMap_t &revisions,
const std::string &href,

View file

@ -48,11 +48,11 @@ std::string features()
return boost::join(res, ", ");
}
URI URI::parse(const std::string &url)
URI URI::parse(const std::string &url, bool collection)
{
ne_uri uri;
int error = ne_uri_parse(url.c_str(), &uri);
URI res = fromNeon(uri);
URI res = fromNeon(uri, collection);
if (!res.m_port) {
res.m_port = ne_uri_defaultport(res.m_scheme.c_str());
}
@ -66,14 +66,14 @@ URI URI::parse(const std::string &url)
return res;
}
URI URI::fromNeon(const ne_uri &uri)
URI URI::fromNeon(const ne_uri &uri, bool collection)
{
URI res;
if (uri.scheme) { res.m_scheme = uri.scheme; }
if (uri.host) { res.m_host = uri.host; }
if (uri.userinfo) { res.m_userinfo = uri.userinfo; }
if (uri.path) { res.m_path = normalizePath(uri.path, false); }
if (uri.path) { res.m_path = normalizePath(uri.path, collection); }
if (uri.query) { res.m_query = uri.query; }
if (uri.fragment) { res.m_fragment = uri.fragment; }
res.m_port = uri.port;
@ -139,10 +139,17 @@ std::string URI::normalizePath(const std::string &path, bool collection)
std::string res;
res.reserve(path.size() * 150 / 100);
// always start with one leading slash
res = "/";
typedef boost::split_iterator<string::const_iterator> string_split_iterator;
string_split_iterator it =
boost::make_split_iterator(path, boost::first_finder("/", boost::is_iequal()));
while (!it.eof()) {
if (it->begin() == it->end()) {
// avoid adding empty path components
++it;
} else {
std::string split(it->begin(), it->end());
// Let's have an exception here for "%u", since we use that to replace the
// actual username into the path. It's safe to ignore "%u" because it
@ -159,6 +166,7 @@ std::string URI::normalizePath(const std::string &path, bool collection)
res += '/';
}
}
}
if (collection && !boost::ends_with(res, "/")) {
res += '/';
}

View file

@ -141,10 +141,13 @@ struct URI {
* Split URL into parts. Throws TransportAgentException on
* invalid url. Port will be set to default for scheme if not set
* in URL. Path is normalized.
*
* @param collection set to true if the normalized path is for
* a collection and shall have a trailing sl
*/
static URI parse(const std::string &url);
static URI parse(const std::string &url, bool collection=false);
static URI fromNeon(const ne_uri &other);
static URI fromNeon(const ne_uri &other, bool collection=false);
/**
* produce new URI from current path and new one (may be absolute
@ -164,7 +167,9 @@ struct URI {
/**
* Removes differences caused by escaping different characters.
* Appends slash if path is a collection (or meant to be one) and
* doesn't have a trailing slash.
* doesn't have a trailing slash. Removes double slashes.
*
* @param path an absolute path (leading slash)
*/
static std::string normalizePath(const std::string &path, bool collection);

View file

@ -333,7 +333,7 @@ void WebDAVSource::contactServer()
std::string database = getDatabaseID();
if (!database.empty() &&
m_contextSettings) {
m_calendar = Neon::URI::parse(database);
m_calendar = Neon::URI::parse(database, true);
// m_contextSettings = m_settings, so this sets m_settings->getURL()
m_contextSettings->setURL(database);
// start talking to host defined by m_settings->getURL()
@ -596,11 +596,15 @@ bool WebDAVSource::findCollections(const boost::function<bool (const std::string
// First dump WebDAV "allprops" properties (does not contain
// properties which must be asked for explicitly!). Only
// relevant for debugging.
try {
SE_LOG_DEBUG(NULL, NULL, "debugging: read all WebDAV properties of %s", path.c_str());
Neon::Session::PropfindPropCallback_t callback =
boost::bind(&WebDAVSource::openPropCallback,
this, _1, _2, _3, _4);
m_session->propfindProp(path, 0, NULL, callback, deadline);
m_session->propfindProp(path, 0, NULL, callback, Timespec());
} catch (...) {
handleException();
}
}
// Now ask for some specific properties of interest for us.
@ -666,7 +670,7 @@ bool WebDAVSource::findCollections(const boost::function<bool (const std::string
success = true;
} catch (const Neon::RedirectException &ex) {
// follow to new location
Neon::URI next = Neon::URI::parse(ex.getLocation());
Neon::URI next = Neon::URI::parse(ex.getLocation(), true);
Neon::URI old = m_session->getURI();
// keep old host + scheme + port if not set in next location
if (next.m_scheme.empty()) {
@ -744,7 +748,8 @@ bool WebDAVSource::findCollections(const boost::function<bool (const std::string
// which returns information about "/dav" when asked about "/".
// Move to that path.
if (!m_davProps.empty()) {
string newpath = m_davProps.begin()->first;
pathProps = m_davProps.begin();
string newpath = pathProps->first;
SE_LOG_DEBUG(NULL, NULL, "use properties for '%s' instead of '%s'",
newpath.c_str(), path.c_str());
path = newpath;
@ -1161,11 +1166,15 @@ void WebDAVSource::listAllItemsCallback(const Neon::URI &uri,
std::string WebDAVSource::path2luid(const std::string &path)
{
if (boost::starts_with(path, m_calendar.m_path)) {
return Neon::URI::unescape(path.substr(m_calendar.m_path.size()));
// m_calendar.m_path is normalized, path is not.
// Before comparing, normalize it.
std::string res = Neon::URI::normalizePath(path, false);
if (boost::starts_with(res, m_calendar.m_path)) {
res = Neon::URI::unescape(res.substr(m_calendar.m_path.size()));
} else {
return path;
// keep full, absolute path as LUID
}
return res;
}
std::string WebDAVSource::luid2path(const std::string &luid)

View file

@ -226,6 +226,15 @@ class WebDAVSource : public TrackingSyncSource, private boost::noncopyable
op(oldBackup, dryrun, report);
}
/**
* return true if the resource with the given properties is one
* of those collections which is guaranteed to not contain
* other, unrelated collections (a CalDAV collection must not
* contain a CardDAV collection, for example)
*/
bool ignoreCollection(const StringMap &props) const;
protected:
/**
* Extracts ETag from response header, empty if not found.
*/
@ -235,14 +244,6 @@ class WebDAVSource : public TrackingSyncSource, private boost::noncopyable
* Extracts new LUID from response header, empty if not found.
*/
std::string getLUID(Neon::Request &req);
/**
* return true if the resource with the given properties is one
* of those collections which is guaranteed to not contain
* other, unrelated collections (a CalDAV collection must not
* contain a CardDAV collection, for example)
*/
bool ignoreCollection(const StringMap &props) const;
};
SE_END_CXX

View file

@ -1272,7 +1272,7 @@ bool Cmdline::run() {
source->throwError("reading items not supported");
}
err = ops.m_startDataRead("", "");
err = ops.m_startDataRead(*source, "", "");
CHECK_ERROR("reading items");
list<string> luids;
readLUIDs(source, luids);
@ -1292,7 +1292,7 @@ bool Cmdline::run() {
}
list<string> luids;
bool deleteAll = std::find(m_luids.begin(), m_luids.end(), "*") != m_luids.end();
err = ops.m_startDataRead("", "");
err = ops.m_startDataRead(*source, "", "");
CHECK_ERROR("reading items");
if (deleteAll) {
readLUIDs(source, luids);
@ -1300,21 +1300,21 @@ bool Cmdline::run() {
luids = m_luids;
}
if (ops.m_endDataRead) {
err = ops.m_endDataRead();
err = ops.m_endDataRead(*source);
CHECK_ERROR("stop reading items");
}
if (ops.m_startDataWrite) {
err = ops.m_startDataWrite();
err = ops.m_startDataWrite(*source);
CHECK_ERROR("writing items");
}
BOOST_FOREACH(const string &luid, luids) {
sysync::ItemIDType id;
id.item = (char *)luid.c_str();
err = ops.m_deleteItem(&id);
err = ops.m_deleteItem(*source, &id);
CHECK_ERROR("deleting item");
}
char *token;
err = ops.m_endDataWrite(true, &token);
err = ops.m_endDataWrite(*source, true, &token);
if (token) {
free(token);
}
@ -1325,14 +1325,14 @@ bool Cmdline::run() {
source->throwError("reading/writing items directly not supported");
}
if (m_import || m_update) {
err = ops.m_startDataRead("", "");
err = ops.m_startDataRead(*source, "", "");
CHECK_ERROR("reading items");
if (ops.m_endDataRead) {
err = ops.m_endDataRead();
err = ops.m_endDataRead(*source);
CHECK_ERROR("stop reading items");
}
if (ops.m_startDataWrite) {
err = ops.m_startDataWrite();
err = ops.m_startDataWrite(*source);
CHECK_ERROR("writing items");
}
@ -1412,13 +1412,13 @@ bool Cmdline::run() {
}
}
char *token = NULL;
err = ops.m_endDataWrite(true, &token);
err = ops.m_endDataWrite(*source, true, &token);
if (token) {
free(token);
}
CHECK_ERROR("stop writing items");
} else if (m_export) {
err = ops.m_startDataRead("", "");
err = ops.m_startDataRead(*source, "", "");
CHECK_ERROR("reading items");
ostream *out = NULL;
@ -1606,13 +1606,13 @@ void Cmdline::readLUIDs(SyncSource *source, list<string> &luids)
const SyncSource::Operations &ops = source->getOperations();
sysync::ItemIDType id;
sysync::sInt32 status;
sysync::TSyError err = ops.m_readNextItem(&id, &status, true);
sysync::TSyError err = ops.m_readNextItem(*source, &id, &status, true);
CHECK_ERROR("next item");
while (status != sysync::ReadNextItem_EOF) {
luids.push_back(id.item);
StrDispose(id.item);
StrDispose(id.parent);
err = ops.m_readNextItem(&id, &status, false);
err = ops.m_readNextItem(*source, &id, &status, false);
CHECK_ERROR("next item");
}
}

View file

@ -39,11 +39,16 @@ static std::vector<LoggerBase *> &loggers()
LoggerBase &LoggerBase::instance()
{
static LoggerStdout DefaultLogger;
// prevent destructing this instance as part of the executable's
// shutdown by allocating it dynamically, because it may be
// needed by other instances that get destructed later
// (order of global instance construction/destruction is
// undefined)
static LoggerStdout *DefaultLogger = new LoggerStdout;
if (!loggers().empty()) {
return *loggers()[loggers().size() - 1];
} else {
return DefaultLogger;
return *DefaultLogger;
}
}

View file

@ -71,6 +71,9 @@ bool MapSyncSource::serverModeEnabled() const
void MapSyncSource::detectChanges(SyncSourceRevisions::ChangeMode mode)
{
// erase content which might have been set in a previous call
reset();
// read old list from node (matches endSync() code)
m_revisions.clear();
ConfigProps props;

View file

@ -1059,6 +1059,9 @@ private:
}
public:
/** allow iterating over sources */
const inherited *getSourceSet() const { return this; }
LogLevel getLogLevel() const { return m_logLevel; }
void setLogLevel(LogLevel logLevel) { m_logLevel = logLevel; }
@ -1277,21 +1280,28 @@ public:
// call when all sync sources are ready to dump
// pre-sync databases
// @param excludeSource when non-empty, limit preparation to that source
void syncPrepare(const string &excludeSource = "") {
// @param sourceName limit preparation to that source
void syncPrepare(const string &sourceName) {
if (m_prepared.find(sourceName) != m_prepared.end()) {
// data dump was already done (can happen when running multiple
// SyncML sessions)
return;
}
if (m_logdir.getLogfile().size() &&
m_doLogging &&
(m_client.getDumpData() || m_client.getPrintChanges())) {
// dump initial databases
SE_LOG_INFO(NULL, NULL, "creating complete data backup before sync (%s)",
SE_LOG_INFO(NULL, NULL, "creating complete data backup of source %s before sync (%s)",
sourceName.c_str(),
(m_client.getDumpData() && m_client.getPrintChanges()) ? "enabled with dumpData and needed for printChanges" :
m_client.getDumpData() ? "because it was enabled with dumpData" :
m_client.getPrintChanges() ? "needed for printChanges" :
"???");
dumpDatabases("before", &SyncSourceReport::m_backupBefore, excludeSource);
dumpDatabases("before", &SyncSourceReport::m_backupBefore, sourceName);
if (m_client.getPrintChanges()) {
// compare against the old "after" database dump
dumpLocalChanges("", "after", "before", excludeSource,
dumpLocalChanges("", "after", "before", sourceName,
StringPrintf("%s data changes to be applied during synchronization:\n",
m_client.isLocalSync() ? m_client.getContextName().c_str() : "Local"));
}
@ -1463,6 +1473,27 @@ string SyncContext::askPassword(const string &passwordName, const string &descr,
}
}
void SyncContext::requestAnotherSync()
{
if (m_activeContext &&
m_activeContext->m_engine.get() &&
m_activeContext->m_session) {
SharedKey sessionKey =
m_activeContext->m_engine.OpenSessionKey(m_activeContext->m_session);
m_activeContext->m_engine.SetInt32Value(sessionKey,
"restartsync",
true);
}
}
const std::vector<SyncSource *> *SyncContext::getSources() const
{
return m_sourceListPtr ?
m_sourceListPtr->getSourceSet() :
NULL;
}
void SyncContext::readStdin(string &content)
{
if (!ReadFile(cin, content)) {
@ -1677,9 +1708,18 @@ void SyncContext::displaySourceProgress(sysync::TProgressEventEnum type,
}
break;
}
if (source.getFinalSyncMode() == SYNC_NONE) {
source.recordFinalSyncMode(SyncMode(mode));
source.recordFirstSync(extra1 == 2);
source.recordResumeSync(extra2 == 1);
} else if (SyncMode(mode) != SYNC_NONE) {
// may happen when the source is used in multiple
// SyncML sessions; only remember the initial sync
// mode in that case and count all following syncs
// (they should only finish the work of the initial
// one)
source.recordRestart();
}
} else {
SE_LOG_INFO(NULL, NULL, "%s: restore from backup", source.getDisplayName().c_str());
source.recordFinalSyncMode(SYNC_RESTORE_FROM_BACKUP);
@ -2428,6 +2468,7 @@ void SyncContext::getConfigXML(string &xml, string &configname)
debug << "<xmltranslate>" << (loglevel >= 4 ? "yes" : "no") << "</xmltranslate>\n";
if (loglevel >= 3) {
debug <<
" <sourcelink>doxygen</sourcelink>\n"
" <enable option=\"all\"/>\n"
" <enable option=\"userdata\"/>\n"
" <enable option=\"scripts\"/>\n"
@ -3009,7 +3050,7 @@ SyncMLStatus SyncContext::sync(SyncReport *report)
}
// request callback when starting to use source
source->addCallback(boost::bind(&SyncContext::startSourceAccess, this, source), &SyncSource::Operations::m_startAccess);
source->getOperations().m_startDataRead.getPreSignal().connect(boost::bind(&SyncContext::startSourceAccess, this, source));
}
// ready to go

View file

@ -455,6 +455,23 @@ class SyncContext : public SyncConfig, public ConfigUserInterface {
bool getRemoteInitiated() {return m_remoteInitiated;}
void setRemoteInitiated(bool remote) {m_remoteInitiated = remote;}
/**
* If called while a sync session runs,
* the engine will finish the session and then
* immediately try to run another one with
* the same sources.
*
* Does nothing when called at the wrong time.
* There's no guarantee either that restarting is
* possible.
*/
static void requestAnotherSync();
/**
* access to current set of sync sources, NULL if not instantiated yet
*/
const std::vector<SyncSource *> *getSources() const;
/**
* Read from stdin until end of stream.
*
@ -730,7 +747,7 @@ class SyncContext : public SyncConfig, public ConfigUserInterface {
void initLocalSync(const string &config);
/**
* called by SynthesDBPlugin in SyncEvolution_StartDataRead()
* called via pre-signal of m_startDataRead
*/
void startSourceAccess(SyncSource *source);

View file

@ -809,7 +809,11 @@ void SyncReport::prettyPrint(std::ostream &out, int flags) const
SyncSourceReport::ITEM_ANY,
SyncSourceReport::ITEM_RECEIVED_BYTES)) {
line <<
PrettyPrintSyncMode(source.getFinalSyncMode()) << ", " <<
PrettyPrintSyncMode(source.getFinalSyncMode()) << ", ";
if (source.getRestarts()) {
line << source.getRestarts() + 1 << " cycles, ";
}
line <<
source.getItemStat(SyncSourceReport::ITEM_LOCAL,
SyncSourceReport::ITEM_ANY,
SyncSourceReport::ITEM_SENT_BYTES) / 1024 <<
@ -980,6 +984,10 @@ ConfigNode &operator << (ConfigNode &node, const SyncReport &report)
string key;
key = prefix + "-mode";
node.setProperty(key, PrettyPrintSyncMode(source.getFinalSyncMode()));
if (source.getRestarts()) {
key = prefix + "-restarts";
node.setProperty(key, source.getRestarts());
}
key = prefix + "-first";
node.setProperty(key, source.isFirstSync());
key = prefix + "-resume";
@ -1066,6 +1074,11 @@ ConfigNode &operator >> (ConfigNode &node, SyncReport &report)
source.setItemStat(location, state, result, intval);
} else if (key == "mode") {
source.recordFinalSyncMode(StringToSyncMode(prop.second));
} else if (key == "restarts") {
int value;
if (node.getProperty(prop.first, value)) {
source.setRestarts(value);
}
} else if (key == "first") {
bool value;
if (node.getProperty(prop.first, value)) {

View file

@ -265,6 +265,7 @@ class SyncSourceReport {
m_resume = false;
m_mode = SYNC_NONE;
m_status = STATUS_OK;
m_restarts = 0;
}
enum ItemLocation {
@ -330,6 +331,14 @@ class SyncSourceReport {
void recordFinalSyncMode(SyncMode mode) { m_mode = mode; }
SyncMode getFinalSyncMode() const { return m_mode; }
void recordRestart() { m_restarts++; }
void setRestarts(int restarts) { m_restarts = restarts; }
/**
* number of times that the sync session was restarted
* involving the source, usually zero
*/
int getRestarts() const { return m_restarts; }
void recordFirstSync(bool isFirstSync) { m_first = isFirstSync; }
bool isFirstSync() const { return m_first; }
@ -354,6 +363,7 @@ class SyncSourceReport {
int m_stat[ITEM_LOCATION_MAX + 1][ITEM_STATE_MAX + 1][ITEM_RESULT_MAX + 1];
SyncMode m_mode;
int m_restarts;
bool m_first;
bool m_resume;
SyncMLStatus m_status;

View file

@ -36,6 +36,7 @@
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/lambda/lambda.hpp>
#include <ctype.h>
#include <errno.h>
@ -113,17 +114,18 @@ void SyncSourceBase::getDatastoreXML(string &xml, XMLConfigFragments &fragments)
xmlstream <<
" <plugin_earlystartdataread>yes</plugin_earlystartdataread>\n";
}
if (info.m_readOnly) {
xmlstream <<
" <!-- if this is set to 'yes', SyncML clients can only read\n"
" from the database, but make no modifications -->\n"
" <readonly>yes</readonly>\n";
}
xmlstream <<
" <plugin_datastoreadmin>" <<
(serverModeEnabled() ? "yes" : "no") <<
"</plugin_datastoreadmin>\n"
" <fromremoteonlysupport> yes </fromremoteonlysupport>\n"
"\n"
" <!-- General datastore settings for all DB types -->\n"
"\n"
" <!-- if this is set to 'yes', SyncML clients can only read\n"
" from the database, but make no modifications -->\n"
" <readonly>no</readonly>\n"
" <canrestart>yes</canrestart>\n"
"\n"
" <!-- conflict strategy: Newer item wins\n"
" You can set 'server-wins' or 'client-wins' as well\n"
@ -344,6 +346,17 @@ string SyncSource::backendsDebug() {
return scannedModules.debug.str();
}
void SyncSource::requestAnotherSync()
{
// At the moment the per-source request to restart cannot be
// stored; instead only a per-session request is set. That's okay
// for now because restarting is limited to sessions with only
// one source active (intentional simplification).
SE_LOG_DEBUG(this, NULL, "requesting another sync");
SyncContext::requestAnotherSync();
}
SyncSource *SyncSource::createSource(const SyncSourceParams &params, bool error, SyncConfig *config)
{
SourceType sourceType = getSourceType(params.m_nodes);
@ -489,6 +502,8 @@ SyncSource::Databases VirtualSyncSource::getDatabases()
void SyncSourceSession::init(SyncSource::Operations &ops)
{
ops.m_startDataRead = boost::bind(&SyncSourceSession::startDataRead, this, _1, _2);
ops.m_endDataRead = boost::lambda::constant(sysync::LOCERR_OK);
ops.m_startDataWrite = boost::lambda::constant(sysync::LOCERR_OK);
ops.m_endDataWrite = boost::bind(&SyncSourceSession::endDataWrite, this, _1, _2);
}
@ -522,6 +537,19 @@ bool SyncSourceChanges::addItem(const string &luid, State state)
return res.second;
}
bool SyncSourceChanges::reset()
{
bool removed = false;
for (int i = 0; i < MAX; i++) {
if (!m_items[i].empty()) {
m_items[i].clear();
removed = true;
}
}
m_first = true;
return removed;
}
sysync::TSyError SyncSourceChanges::iterate(sysync::ItemID aID,
sysync::sInt32 *aStatus,
bool aFirst)
@ -850,6 +878,8 @@ void ItemCache::finalize(BackupReport &report)
void SyncSourceRevisions::initRevisions()
{
if (!m_revisionsSet) {
// might still be filled with garbage from previous run
m_revisions.clear();
listAllItems(m_revisions);
m_revisionsSet = true;
}
@ -983,6 +1013,17 @@ void SyncSourceRevisions::restoreData(const SyncSource::Operations::ConstBackupI
void SyncSourceRevisions::detectChanges(ConfigNode &trackingNode, ChangeMode mode)
{
// erase content which might have been set in a previous call
reset();
if (!m_firstCycle) {
// detectChanges() must have been called before;
// don't trust our cached revisions in that case (not updated during sync!)
// TODO: keep the revision map up-to-date as part of a sync and reuse it
m_revisionsSet = false;
} else {
m_firstCycle = false;
}
if (mode == CHANGES_NONE) {
// shortcut because nothing changed: just copy our known item list
ConfigProps props;
@ -1120,6 +1161,7 @@ void SyncSourceRevisions::init(SyncSourceRaw *raw,
m_modTimeStamp = 0;
m_revisionAccuracySeconds = granularity;
m_revisionsSet = false;
m_firstCycle = false;
if (raw) {
ops.m_backupData = boost::bind(&SyncSourceRevisions::backupData,
this, _1, _2, _3);
@ -1128,7 +1170,7 @@ void SyncSourceRevisions::init(SyncSourceRaw *raw,
ops.m_restoreData = boost::bind(&SyncSourceRevisions::restoreData,
this, _1, _2, _3);
}
ops.m_endSession.push_back(boost::bind(&SyncSourceRevisions::sleepSinceModification,
ops.m_endDataWrite.getPostSignal().connect(boost::bind(&SyncSourceRevisions::sleepSinceModification,
this));
}
@ -1162,46 +1204,31 @@ std::string SyncSourceLogging::getDescription(const string &luid)
return "";
}
sysync::TSyError SyncSourceLogging::insertItemAsKey(sysync::KeyH aItemKey, sysync::ItemID newID, const boost::function<SyncSource::Operations::InsertItemAsKey_t> &parent)
void SyncSourceLogging::insertItemAsKey(sysync::KeyH aItemKey, sysync::ItemID newID)
{
std::string description = getDescription(aItemKey);
SE_LOG_INFO(this, NULL,
description.empty() ? "%s <%s>" : "%s \"%s\"",
"adding",
!description.empty() ? description.c_str() : "???");
if (parent) {
return parent(aItemKey, newID);
} else {
return sysync::LOCERR_NOTIMP;
}
}
sysync::TSyError SyncSourceLogging::updateItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID, const boost::function<SyncSource::Operations::UpdateItemAsKey_t> &parent)
void SyncSourceLogging::updateItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID)
{
std::string description = getDescription(aItemKey);
SE_LOG_INFO(this, NULL,
description.empty() ? "%s <%s>" : "%s \"%s\"",
"updating",
!description.empty() ? description.c_str() : aID ? aID->item : "???");
if (parent) {
return parent(aItemKey, aID, newID);
} else {
return sysync::LOCERR_NOTIMP;
}
}
sysync::TSyError SyncSourceLogging::deleteItem(sysync::cItemID aID, const boost::function<SyncSource::Operations::DeleteItem_t> &parent)
void SyncSourceLogging::deleteItem(sysync::cItemID aID)
{
std::string description = getDescription(aID->item);
SE_LOG_INFO(this, NULL,
description.empty() ? "%s <%s>" : "%s \"%s\"",
"deleting",
!description.empty() ? description.c_str() : aID->item);
if (parent) {
return parent(aID);
} else {
return sysync::LOCERR_NOTIMP;
}
}
void SyncSourceLogging::init(const std::list<std::string> &fields,
@ -1211,12 +1238,12 @@ void SyncSourceLogging::init(const std::list<std::string> &fields,
m_fields = fields;
m_sep = sep;
ops.m_insertItemAsKey = boost::bind(&SyncSourceLogging::insertItemAsKey,
this, _1, _2, ops.m_insertItemAsKey);
ops.m_updateItemAsKey = boost::bind(&SyncSourceLogging::updateItemAsKey,
this, _1, _2, _3, ops.m_updateItemAsKey);
ops.m_deleteItem = boost::bind(&SyncSourceLogging::deleteItem,
this, _1, ops.m_deleteItem);
ops.m_insertItemAsKey.getPreSignal().connect(boost::bind(&SyncSourceLogging::insertItemAsKey,
this, _2, _3));
ops.m_updateItemAsKey.getPreSignal().connect(boost::bind(&SyncSourceLogging::updateItemAsKey,
this, _2, _3, _4));
ops.m_deleteItem.getPreSignal().connect(boost::bind(&SyncSourceLogging::deleteItem,
this, _2));
}
sysync::TSyError SyncSourceAdmin::loadAdminData(const char *aLocDB,
@ -1390,8 +1417,7 @@ void SyncSourceAdmin::init(SyncSource::Operations &ops,
this, _1);
ops.m_deleteMapItem = boost::bind(&SyncSourceAdmin::deleteMapItem,
this, _1);
ops.m_endSession.push_back(boost::bind(&SyncSourceAdmin::flush,
this));
ops.m_endDataWrite.getPostSignal().connect(boost::bind(&SyncSourceAdmin::flush, this));
}
void SyncSourceAdmin::init(SyncSource::Operations &ops,

View file

@ -30,6 +30,7 @@
#include <synthesis/blobs.h>
#include <boost/function.hpp>
#include <boost/signals2.hpp>
#include <syncevo/declarations.h>
SE_BEGIN_CXX
@ -621,6 +622,327 @@ struct XMLConfigFragments {
m_remoterules;
};
/**
* used in SyncSource::Operations post-operation signal
*/
enum OperationExecution {
OPERATION_SKIPPED, /**< operation was skipped because pre-operation slot threw an exception */
OPERATION_EXCEPTION, /**< operation itself failed with an exception (may also return error code) */
OPERATION_FINISHED, /**< operation finished normally (but might have returned an error code) */
OPERATION_EMPTY /**< operation not implemented */
};
/**
* Implements the "call all slots, error if any failed" semantic of
* the pre- and post-signals described below.
*/
class OperationSlotInvoker {
public:
typedef sysync::TSyError result_type;
template<typename InputIterator>
result_type operator() (InputIterator first, InputIterator last) const
{
result_type res = sysync::LOCERR_OK;
while (first != last) {
try {
*first;
} catch (...) {
SyncMLStatus status = Exception::handle();
if (res == sysync::LOCERR_OK) {
res = static_cast<result_type>(status);
}
}
++first;
}
return res;
}
};
/**
* helper class, needs to be specialized based on number of parameters
*/
template<class F, int arity> class OperationWrapperSwitch;
/** one parameter */
template<class F> class OperationWrapperSwitch<F, 0>
{
public:
typedef sysync::TSyError result_type;
typedef boost::function<F> OperationType;
/**
* The pre-signal is invoked with the same parameters as
* the operations, plus a reference to the sync source as
* initial parameter. Slots may throw exceptions, which
* will skip the actual implementation. However, all slots
* will be invoked exactly once even if one of them throws
* an exception. The result of the operation then is the
* error code extracted from the first exception (see
* OperationSlotInvoker).
*/
typedef boost::signals2::signal<void (SyncSource &),
OperationSlotInvoker> PreSignal;
/**
* The post-signal is invoked exactly once, regardless
* whether the implementation was skipped, executed or
* doesn't exist at all. This information is passed as the
* second parameter, followed by the result of the
* operation or the pre-signals, followed by the
* parameters of the operation.
*
* As with the pre-signal, any slot may throw an exception
* to override the final result, but this won't interrupt
* calling the rest of the slots.
*/
typedef boost::signals2::signal<void (SyncSource &, OperationExecution, sysync::TSyError),
OperationSlotInvoker> PostSignal;
/**
* invokes signals and implementation of operation,
* combines all return codes into one
*/
sysync::TSyError operator () (SyncSource &source) const throw ()
{
sysync::TSyError res;
OperationExecution exec;
res = m_pre(source);
if (res != sysync::LOCERR_OK) {
exec = OPERATION_SKIPPED;
} else {
if (m_operation) {
try {
res = m_operation();
exec = OPERATION_FINISHED;
} catch (...) {
res = Exception::handle(/* source */);
exec = OPERATION_EXCEPTION;
}
} else {
res = sysync::LOCERR_NOTIMP;
exec = OPERATION_EMPTY;
}
}
sysync::TSyError newres = m_post(source, exec, res);
if (newres != sysync::LOCERR_OK) {
res = newres;
}
return res == STATUS_FATAL ? STATUS_DATASTORE_FAILURE : res;
}
/**
* Anyone may connect code to the signals via
* getOperations().getPre/PostSignal(), although strictly
* speaking this modifies the behavior of the
* implementation.
*/
PreSignal &getPreSignal() const { return const_cast<OperationWrapperSwitch<F, 0> *>(this)->m_pre; }
PostSignal &getPostSignal() const { return const_cast<OperationWrapperSwitch<F, 0> *>(this)->m_post; }
protected:
OperationType m_operation;
private:
PreSignal m_pre;
PostSignal m_post;
};
template<class F> class OperationWrapperSwitch<F, 1>
{
public:
typedef sysync::TSyError result_type;
typedef boost::function<F> OperationType;
typedef typename boost::function<F>::arg1_type arg1_type;
typedef boost::signals2::signal<void (SyncSource &, arg1_type a1),
OperationSlotInvoker> PreSignal;
typedef boost::signals2::signal<void (SyncSource &, OperationExecution, sysync::TSyError,
arg1_type a1),
OperationSlotInvoker> PostSignal;
sysync::TSyError operator () (SyncSource &source,
arg1_type a1) const throw ()
{
sysync::TSyError res;
OperationExecution exec;
res = m_pre(source, a1);
if (res != sysync::LOCERR_OK) {
exec = OPERATION_SKIPPED;
} else {
if (m_operation) {
try {
res = m_operation(a1);
exec = OPERATION_FINISHED;
} catch (...) {
res = Exception::handle(/* source */);
exec = OPERATION_EXCEPTION;
}
} else {
res = sysync::LOCERR_NOTIMP;
exec = OPERATION_EMPTY;
}
}
sysync::TSyError newres = m_post(source, exec, res, a1);
if (newres != sysync::LOCERR_OK) {
res = newres;
}
return res == STATUS_FATAL ? STATUS_DATASTORE_FAILURE : res;
}
PreSignal &getPreSignal() const { return const_cast<OperationWrapperSwitch<F, 1> *>(this)->m_pre; }
PostSignal &getPostSignal() const { return const_cast<OperationWrapperSwitch<F, 1> *>(this)->m_post; }
protected:
OperationType m_operation;
private:
PreSignal m_pre;
PostSignal m_post;
};
template<class F> class OperationWrapperSwitch<F, 2>
{
public:
typedef sysync::TSyError result_type;
typedef boost::function<F> OperationType;
typedef typename boost::function<F>::arg1_type arg1_type;
typedef typename boost::function<F>::arg2_type arg2_type;
typedef boost::signals2::signal<void (SyncSource &, arg1_type a1, arg2_type a2),
OperationSlotInvoker> PreSignal;
typedef boost::signals2::signal<void (SyncSource &, OperationExecution, sysync::TSyError,
arg1_type a1, arg2_type a2),
OperationSlotInvoker> PostSignal;
sysync::TSyError operator () (SyncSource &source,
arg1_type a1, arg2_type a2) const throw ()
{
sysync::TSyError res;
OperationExecution exec;
res = m_pre(source, a1, a2);
if (res != sysync::LOCERR_OK) {
exec = OPERATION_SKIPPED;
} else {
if (m_operation) {
try {
res = m_operation(a1, a2);
exec = OPERATION_FINISHED;
} catch (...) {
res = Exception::handle(/* source */);
exec = OPERATION_EXCEPTION;
}
} else {
res = sysync::LOCERR_NOTIMP;
exec = OPERATION_EMPTY;
}
}
sysync::TSyError newres = m_post(source, exec, res, a1, a2);
if (newres != sysync::LOCERR_OK) {
res = newres;
}
return res == STATUS_FATAL ? STATUS_DATASTORE_FAILURE : res;
}
PreSignal &getPreSignal() const { return const_cast<OperationWrapperSwitch<F, 2> *>(this)->m_pre; }
PostSignal &getPostSignal() const { return const_cast<OperationWrapperSwitch<F, 2> *>(this)->m_post; }
protected:
OperationType m_operation;
private:
PreSignal m_pre;
PostSignal m_post;
};
template<class F> class OperationWrapperSwitch<F, 3>
{
public:
typedef sysync::TSyError result_type;
typedef boost::function<F> OperationType;
typedef typename boost::function<F>::arg1_type arg1_type;
typedef typename boost::function<F>::arg2_type arg2_type;
typedef typename boost::function<F>::arg3_type arg3_type;
typedef boost::signals2::signal<void (SyncSource &, arg1_type a1, arg2_type a2, arg3_type a3),
OperationSlotInvoker> PreSignal;
typedef boost::signals2::signal<void (SyncSource &, OperationExecution, sysync::TSyError,
arg1_type a1, arg2_type a2, arg3_type a3),
OperationSlotInvoker> PostSignal;
sysync::TSyError operator () (SyncSource &source,
arg1_type a1, arg2_type a2, arg3_type a3) const throw ()
{
sysync::TSyError res;
OperationExecution exec;
res = m_pre(source, a1, a2, a3);
if (res != sysync::LOCERR_OK) {
exec = OPERATION_SKIPPED;
} else {
if (m_operation) {
try {
res = m_operation(a1, a2, a3);
exec = OPERATION_FINISHED;
} catch (...) {
res = Exception::handle(/* source */);
exec = OPERATION_EXCEPTION;
}
} else {
res = sysync::LOCERR_NOTIMP;
exec = OPERATION_EMPTY;
}
}
sysync::TSyError newres = m_post(source, exec, res, a1, a2, a3);
if (newres != sysync::LOCERR_OK) {
res = newres;
}
return res == STATUS_FATAL ? STATUS_DATASTORE_FAILURE : res;
}
PreSignal &getPreSignal() const { return const_cast<OperationWrapperSwitch<F, 3> *>(this)->m_pre; }
PostSignal &getPostSignal() const { return const_cast<OperationWrapperSwitch<F, 3> *>(this)->m_post; }
protected:
OperationType m_operation;
private:
PreSignal m_pre;
PostSignal m_post;
};
/**
* This mimics a boost::function with the same signature. The function
* signature F must have a sysync::TSyError error return code, as in most
* of the Synthesis DB API.
*
* Specializations of this class for operations with different number
* of parameters provide a call operator which invokes a pre- and
* post-signal around the actual implementation. See
* OperationWrapperSwitch<F, 0> for details.
*
* Additional operations could be wrapped by providing more
* specializations (different return code, more parameters). The
* number or parameters in the operation cannot exceed six, because
* adding three more parameters in the post-signal would push the
* total number of parameters in that signal beyond the limit of nine
* supported arguments in boost::signals2/boost::function.
*/
template<class F> class OperationWrapper :
public OperationWrapperSwitch<F, boost::function<F>::arity>
{
typedef OperationWrapperSwitch<F, boost::function<F>::arity> inherited;
public:
/** operation implemented? */
operator bool () const { return inherited::m_operation; }
/**
* Only usable by derived classes via read/write m_operations:
* sets the actual implementation of the operation.
*/
void operator = (const boost::function<F> &operation)
{
inherited::m_operation = operation;
}
};
/**
* abstract base class for SyncSource with some common functionality
* and no data
@ -780,6 +1102,9 @@ class SyncSourceBase : public Logger {
* operations, the bridge code in SynthesisDBPlugin code catches
* exceptions, logs them and translates them into Synthesis error
* codes, which are returned to the Synthesis engine.
*
* Monitoring of most DB operations is possible via the pre- and
* post-signals managed by OperationWrapper.
*/
struct Operations {
/**
@ -894,51 +1219,35 @@ class SyncSourceBase : public Logger {
* to be part of a sync session.
*/
/**@{*/
typedef void (Callback_t)();
typedef boost::function<Callback_t> CallbackFunctor_t;
typedef std::list<CallbackFunctor_t> Callbacks_t;
typedef OperationWrapper<sysync::TSyError (const char *, const char *)> StartDataRead_t;
StartDataRead_t m_startDataRead;
/** all of these functions will be called before accessing
the source's data for the first time, i.e., before m_startDataRead */
Callbacks_t m_startAccess;
typedef OperationWrapper<sysync::TSyError ()> EndDataRead_t;
EndDataRead_t m_endDataRead;
typedef sysync::TSyError (StartDataRead_t)(const char *lastToken, const char *resumeToken);
boost::function<StartDataRead_t> m_startDataRead;
typedef OperationWrapper<sysync::TSyError ()> StartDataWrite_t;
StartDataWrite_t m_startDataWrite;
/** all of these functions will be called directly after
m_startDataRead() returned successfully */
Callbacks_t m_startSession;
typedef sysync::TSyError (EndDataRead_t)();
boost::function<EndDataRead_t> m_endDataRead;
typedef sysync::TSyError (StartDataWrite_t)();
boost::function<StartDataWrite_t> m_startDataWrite;
/** all of these functions will be called right
before m_endDataWrite() */
std::list<CallbackFunctor_t> m_endSession;
typedef sysync::TSyError (EndDataWrite_t)(bool success, char **newToken);
boost::function<EndDataWrite_t> m_endDataWrite;
typedef OperationWrapper<sysync::TSyError (bool success, char **newToken)> EndDataWrite_t;
EndDataWrite_t m_endDataWrite;
/** the SynthesisDBPlugin is configured so that this operation
doesn't have to (and cannot) return the item data */
typedef sysync::TSyError (ReadNextItem_t)(sysync::ItemID aID,
sysync::sInt32 *aStatus, bool aFirst);
boost::function<ReadNextItem_t> m_readNextItem;
typedef OperationWrapper<sysync::TSyError (sysync::ItemID aID,
sysync::sInt32 *aStatus, bool aFirst)> ReadNextItem_t;
ReadNextItem_t m_readNextItem;
typedef sysync::TSyError (ReadItemAsKey_t)(sysync::cItemID aID, sysync::KeyH aItemKey);
boost::function<ReadItemAsKey_t> m_readItemAsKey;
typedef OperationWrapper<sysync::TSyError (sysync::cItemID aID, sysync::KeyH aItemKey)> ReadItemAsKey_t;
ReadItemAsKey_t m_readItemAsKey;
typedef sysync::TSyError (InsertItemAsKey_t)(sysync::KeyH aItemKey, sysync::ItemID newID);
boost::function<InsertItemAsKey_t> m_insertItemAsKey;
typedef OperationWrapper<sysync::TSyError (sysync::KeyH aItemKey, sysync::ItemID newID)> InsertItemAsKey_t;
InsertItemAsKey_t m_insertItemAsKey;
typedef sysync::TSyError (UpdateItemAsKey_t)(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID updID);
boost::function<UpdateItemAsKey_t> m_updateItemAsKey;
typedef OperationWrapper<sysync::TSyError (sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID updID)> UpdateItemAsKey_t;
UpdateItemAsKey_t m_updateItemAsKey;
typedef sysync::TSyError (DeleteItem_t)(sysync::cItemID aID);
boost::function<DeleteItem_t> m_deleteItem;
typedef OperationWrapper<sysync::TSyError (sysync::cItemID aID)> DeleteItem_t;
DeleteItem_t m_deleteItem;
/**@}*/
@ -955,47 +1264,50 @@ class SyncSourceBase : public Logger {
* implementation, like the one from SyncSourceAdmin.
*/
/**@{*/
typedef sysync::TSyError (LoadAdminData_t)(const char *aLocDB,
typedef OperationWrapper<sysync::TSyError (const char *aLocDB,
const char *aRemDB,
char **adminData);
boost::function<LoadAdminData_t> m_loadAdminData;
char **adminData)> LoadAdminData_t;
LoadAdminData_t m_loadAdminData;
typedef sysync::TSyError (SaveAdminData_t)(const char *adminData);
boost::function<SaveAdminData_t> m_saveAdminData;
typedef OperationWrapper<sysync::TSyError (const char *adminData)> SaveAdminData_t;
SaveAdminData_t m_saveAdminData;
// not currently wrapped because it has a different return type;
// templates could be adapted to handle that
typedef bool (ReadNextMapItem_t)(sysync::MapID mID, bool aFirst);
boost::function<ReadNextMapItem_t> m_readNextMapItem;
typedef sysync::TSyError (InsertMapItem_t)(sysync::cMapID mID);
boost::function<InsertMapItem_t> m_insertMapItem;
typedef OperationWrapper<sysync::TSyError (sysync::cMapID mID)> InsertMapItem_t;
InsertMapItem_t m_insertMapItem;
typedef sysync::TSyError (UpdateMapItem_t)(sysync::cMapID mID);
boost::function<UpdateMapItem_t> m_updateMapItem;
typedef OperationWrapper<sysync::TSyError (sysync::cMapID mID)> UpdateMapItem_t;
UpdateMapItem_t m_updateMapItem;
typedef sysync::TSyError (DeleteMapItem_t)(sysync::cMapID mID);
boost::function<DeleteMapItem_t> m_deleteMapItem;
typedef OperationWrapper<sysync::TSyError (sysync::cMapID mID)> DeleteMapItem_t;
DeleteMapItem_t m_deleteMapItem;
typedef sysync::TSyError (ReadBlob_t)(sysync::cItemID aID, const char *aBlobID,
// not wrapped, too many parameters
typedef boost::function<sysync::TSyError (sysync::cItemID aID, const char *aBlobID,
void **aBlkPtr, size_t *aBlkSize,
size_t *aTotSize,
bool aFirst, bool *aLast);
boost::function<ReadBlob_t> m_readBlob;
bool aFirst, bool *aLast)> ReadBlob_t;
ReadBlob_t m_readBlob;
typedef sysync::TSyError (WriteBlob_t)(sysync::cItemID aID, const char *aBlobID,
typedef boost::function<sysync::TSyError (sysync::cItemID aID, const char *aBlobID,
void *aBlkPtr, size_t aBlkSize,
size_t aTotSize,
bool aFirst, bool aLast);
boost::function<WriteBlob_t> m_writeBlob;
bool aFirst, bool aLast)> WriteBlob_t;
WriteBlob_t m_writeBlob;
typedef sysync::TSyError (DeleteBlob_t)(sysync::cItemID aID, const char *aBlobID);
boost::function<DeleteBlob_t> m_deleteBlob;
typedef OperationWrapper<sysync::TSyError (sysync::cItemID aID, const char *aBlobID)> DeleteBlob_t;
DeleteBlob_t m_deleteBlob;
/**@}*/
};
/**
* Read-only access to operations.
*/
virtual const Operations &getOperations() = 0;
virtual const Operations &getOperations() const = 0;
protected:
struct SynthesisInfo {
@ -1061,6 +1373,14 @@ class SyncSourceBase : public Logger {
* See SyncSourceSession::beginSync for further comments.
*/
Bool m_earlyStartDataRead;
/**
* If true, then the storage is considered read-only by the
* engine. All write requests by the peer will be silently
* discarded. This is necessary for slow syncs, where the peer
* might send back modified items.
*/
Bool m_readOnly;
};
/**
@ -1156,13 +1476,7 @@ class SyncSource : virtual public SyncSourceBase, public SyncSourceConfig, publi
* Read-only access to operations. Derived classes can modify
* them via m_operations.
*/
virtual const Operations &getOperations() { return m_operations; }
/**
* outside users of the source are only allowed to add callbacks,
* not overwrite arbitrary operations
*/
void addCallback(Operations::CallbackFunctor_t callback, Operations::Callbacks_t Operations::* where) { (m_operations.*where).push_back(callback); }
virtual const Operations &getOperations() const { return m_operations; }
/**
* closes the data source so that it can be reopened
@ -1190,6 +1504,30 @@ class SyncSource : virtual public SyncSourceBase, public SyncSourceConfig, publi
*/
void popSynthesisAPI();
/**
* If called while a sync session runs (i.e. after m_startDataRead
* (aka beginSync()) and before m_endDataWrite (aka endSync())),
* the engine will finish the session and then immediately try to
* run another session where any source in which requestAnotherSync()
* was called is active again. There is no guarantee that this
* will be possible.
*
* The source must be prepared to correctly handle another sync
* session. m_endDataWrite will be called and then the sequence
* of calls starts again at m_startDataRead.
*
* The sync mode will switch to an incremental sync in the same
* direction as the initial sync (one-way to client or server,
* two-way).
*
* Does nothing when called at the wrong time. There's no
* guarantee either that restarting is possible.
*
* Currently only supported when a single source is active in
* the initial sync.
*/
void requestAnotherSync();
/**
* factory function for a SyncSource that provides the
* source type specified in the params.m_nodes.m_configNode
@ -1438,6 +1776,11 @@ class SyncSourceChanges : virtual public SyncSourceBase {
*/
bool addItem(const string &luid, State state = ANY);
/**
* Wipe out all added items, returning true if any were found.
*/
bool reset();
typedef std::set<std::string> Items_t;
const Items_t &getItems(State state) { return m_items[state]; }
const Items_t &getAllItems() const { return m_items[ANY]; }
@ -1926,6 +2269,7 @@ class SyncSourceRevisions : virtual public SyncSourceChanges, virtual public Syn
/** buffers the result of the initial listAllItems() call */
RevisionMap_t m_revisions;
bool m_revisionsSet;
bool m_firstCycle;
void initRevisions();
/**
@ -2005,12 +2349,9 @@ class SyncSourceLogging : public virtual SyncSourceBase
std::list<std::string> m_fields;
std::string m_sep;
sysync::TSyError insertItemAsKey(sysync::KeyH aItemKey, sysync::ItemID newID,
const boost::function<SyncSource::Operations::InsertItemAsKey_t> &parent);
sysync::TSyError updateItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID,
const boost::function<SyncSource::Operations::UpdateItemAsKey_t> &parent);
sysync::TSyError deleteItem(sysync::cItemID aID,
const boost::function<SyncSource::Operations::DeleteItem_t> &parent);
void insertItemAsKey(sysync::KeyH aItemKey, sysync::ItemID newID);
void updateItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID);
void deleteItem(sysync::cItemID aID);
};
/**

View file

@ -442,15 +442,7 @@ TSyError SyncEvolution_LoadAdminData( CContext aContext, cAppCharP aLocDB,
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = DB_Forbidden;
try {
if (source->getOperations().m_loadAdminData) {
res = source->getOperations().m_loadAdminData(aLocDB, aRemDB, adminData);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_loadAdminData(*source, aLocDB, aRemDB, adminData);
SE_LOG_DEBUG(source, NULL, "LoadAdminData '%s' '%s', '%s' res=%d",
aLocDB, aRemDB, *adminData ? *adminData : "", res);
return res;
@ -465,15 +457,7 @@ TSyError SyncEvolution_SaveAdminData( CContext aContext, cAppCharP adminData )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = DB_Forbidden;
try {
if (source->getOperations().m_saveAdminData) {
res = source->getOperations().m_saveAdminData(adminData);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_saveAdminData(*source, adminData);
SE_LOG_DEBUG(source, NULL, "SaveAdminData '%s' res=%d", adminData, res);
return res;
} /* SaveAdminData */
@ -521,15 +505,7 @@ TSyError SyncEvolution_InsertMapItem( CContext aContext, cMapID mID )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = DB_Forbidden;
try {
if (source->getOperations().m_insertMapItem) {
res = source->getOperations().m_insertMapItem(mID);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_insertMapItem(*source, mID);
SE_LOG_DEBUG(source, NULL, "InsertMapItem '%s' + %x = '%s' + %x res=%d",
NullPtrCheck(mID->localID), mID->ident,
NullPtrCheck(mID->remoteID), mID->flags,
@ -546,15 +522,7 @@ TSyError SyncEvolution_UpdateMapItem( CContext aContext, cMapID mID )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = DB_Forbidden;
try {
if (source->getOperations().m_updateMapItem) {
res = source->getOperations().m_updateMapItem(mID);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_updateMapItem(*source, mID);
SE_LOG_DEBUG(source, NULL, "UpdateMapItem '%s' + %x = '%s' + %x, res=%d",
mID->localID, mID->ident,
mID->remoteID, mID->flags,
@ -572,15 +540,7 @@ TSyError SyncEvolution_DeleteMapItem( CContext aContext, cMapID mID )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = DB_Forbidden;
try {
if (source->getOperations().m_deleteMapItem) {
res = source->getOperations().m_deleteMapItem(mID);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_deleteMapItem(*source, mID);
SE_LOG_DEBUG(source, NULL, "DeleteMapItem '%s' + %x = '%s' + %x res=%d",
mID->localID, mID->ident,
mID->remoteID, mID->flags,
@ -655,23 +615,7 @@ TSyError SyncEvolution_StartDataRead( CContext aContext, cAppCharP lastToken,
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
try {
BOOST_FOREACH(const SyncSource::Operations::CallbackFunctor_t &callback,
source->getOperations().m_startAccess) {
callback();
}
if (source->getOperations().m_startDataRead) {
res = source->getOperations().m_startDataRead(lastToken, resumeToken);
}
BOOST_FOREACH(const SyncSource::Operations::CallbackFunctor_t &callback,
source->getOperations().m_startSession) {
callback();
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_startDataRead(*source, lastToken, resumeToken);
SE_LOG_DEBUG(source, NULL, "StartDataRead last='%s' resume='%s' res=%d",
lastToken, resumeToken, res);
return res;
@ -687,17 +631,9 @@ TSyError SyncEvolution_ReadNextItemAsKey( CContext aContext, ItemID aID, KeyH aI
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
*aStatus = 0;
memset(aID, 0, sizeof(*aID));
if (source->getOperations().m_readNextItem) {
try {
res = source->getOperations().m_readNextItem(aID, aStatus, aFirst);
} catch (...) {
res = source->handleException();
}
}
TSyError res = source->getOperations().m_readNextItem(*source, aID, aStatus, aFirst);
SE_LOG_DEBUG(source, NULL, "ReadNextItemAsKey aStatus=%d aID=(%s,%s) res=%d",
*aStatus, aID->item, aID->parent, res);
return res;
@ -710,15 +646,7 @@ TSyError SyncEvolution_ReadItemAsKey( CContext aContext, cItemID aID, KeyH aItem
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
if (source->getOperations().m_readItemAsKey) {
try {
res = source->getOperations().m_readItemAsKey(aID, aItemKey);
} catch (...) {
res = source->handleException();
}
}
TSyError res = source->getOperations().m_readItemAsKey(*source, aID, aItemKey);
SE_LOG_DEBUG(source, NULL, "ReadItemAsKey aID=(%s,%s) res=%d",
aID->item, aID->parent, res);
return res;
@ -772,15 +700,7 @@ TSyError SyncEvolution_EndDataRead( CContext aContext )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
if (source->getOperations().m_endDataRead) {
try {
res = source->getOperations().m_endDataRead();
} catch (...) {
res = source->handleException();
}
}
TSyError res = source->getOperations().m_endDataRead(*source);
SE_LOG_DEBUG(source, NULL, "EndDataRead res=%d", res);
return res;
}
@ -808,15 +728,7 @@ TSyError SyncEvolution_InsertItemAsKey( CContext aContext, KeyH aItemKey, ItemID
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
if (source->getOperations().m_insertItemAsKey) {
try {
res = source->getOperations().m_insertItemAsKey(aItemKey, newID);
} catch (...) {
res = source->handleException();
}
}
TSyError res = source->getOperations().m_insertItemAsKey(*source, aItemKey, newID);
SE_LOG_DEBUG(source, NULL, "InsertItemAsKey res=%d\n", res);
return res;
}
@ -830,16 +742,7 @@ TSyError SyncEvolution_UpdateItemAsKey( CContext aContext, KeyH aItemKey, cItemI
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
if (source->getOperations().m_updateItemAsKey) {
try {
res = source->getOperations().m_updateItemAsKey(aItemKey, aID, updID);
} catch (...) {
res = source->handleException();
}
}
TSyError res = source->getOperations().m_updateItemAsKey(*source, aItemKey, aID, updID);
SE_LOG_DEBUG(source, NULL, "aID=(%s,%s) res=%d",
aID->item,aID->parent, res);
return res;
@ -868,15 +771,7 @@ TSyError SyncEvolution_DeleteItem( CContext aContext, cItemID aID )
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
if (source->getOperations().m_deleteItem) {
try {
res = source->getOperations().m_deleteItem (aID);
} catch (...) {
res = source->handleException(HANDLE_EXCEPTION_404_IS_OKAY);
}
}
TSyError res = source->getOperations().m_deleteItem(*source, aID);
SE_LOG_DEBUG(source, NULL, "DeleteItem aID=(%s,%s) res=%d",
aID->item, aID->parent, res);
return res;
@ -947,17 +842,7 @@ TSyError SyncEvolution_DeleteBlob( CContext aContext, cItemID aID, cAppCharP aBl
return LOCERR_WRONGUSAGE;
}
TSyError res;
if (source->getOperations().m_deleteBlob) {
try {
res = source->getOperations().m_deleteBlob(aID, aBlobID);
} catch (...) {
res = source->handleException();
}
} else {
res = LOCERR_NOTIMP;
}
TSyError res = source->getOperations().m_deleteBlob(*source, aID, aBlobID);
SE_LOG_DEBUG(source, NULL, "DeleteBlob aID=(%s,%s) aBlobID=(%s) res=%d",
aID->item,aID->parent, aBlobID, res);
return res;
@ -970,19 +855,7 @@ TSyError SyncEvolution_EndDataWrite( CContext aContext, bool success, appCharP *
if (!source) {
return LOCERR_WRONGUSAGE;
}
TSyError res = LOCERR_OK;
try {
BOOST_FOREACH(const SyncSource::Operations::CallbackFunctor_t &callback,
source->getOperations().m_endSession) {
callback();
}
if (source->getOperations().m_endDataWrite) {
res = source->getOperations().m_endDataWrite(success, newToken);
}
} catch (...) {
res = source->handleException();
}
TSyError res = source->getOperations().m_endDataWrite(*source, success, newToken);
SE_LOG_DEBUG(source, NULL, "EndDataWrite %s '%s' res=%d",
success ? "COMMIT":"ROLLBACK", *newToken, res);
return res;

View file

@ -15,5 +15,12 @@
client we'll have not much such changes. -->
<updateclientinslowsync>yes</updateclientinslowsync>
<updateserverinslowsync>yes</updateserverinslowsync>
<!--
unconditionally disable the standard-compliant "skip map phase"
behavior in favor of letting the client choose to restart the
sync session if it has more changes
-->
<completefromclientonly>yes</completefromclientonly>
</remoterule>

File diff suppressed because it is too large Load diff

View file

@ -73,15 +73,15 @@ class CheckSyncReport {
serverAdded(srAdded),
serverUpdated(srUpdated),
serverDeleted(srDeleted),
restarts(0),
mustSucceed(mstSucceed),
syncMode(mode),
m_report(NULL)
{}
virtual ~CheckSyncReport() {}
int clientAdded, clientUpdated, clientDeleted,
serverAdded, serverUpdated, serverDeleted;
int restarts;
bool mustSucceed;
SyncMode syncMode;
@ -90,7 +90,7 @@ class CheckSyncReport {
CheckSyncReport &setMode(SyncMode mode) { syncMode = mode; return *this; }
CheckSyncReport &setReport(SyncReport *report) { m_report = report; return *this; }
CheckSyncReport &setRestarts(int r) { restarts = r; return *this; }
/**
* checks that the sync completed as expected and throws
@ -99,7 +99,12 @@ class CheckSyncReport {
* @param res return code from SyncClient::sync()
* @param report the sync report stored in the SyncClient
*/
virtual void check(SyncMLStatus status, SyncReport &report) const;
void check(SyncMLStatus status, SyncReport &report) const;
/**
* checks that the source report matches with expectations
*/
void check(const std::string &name, const SyncSourceReport &report) const;
};
/**
@ -135,13 +140,13 @@ struct SyncOptions {
bool m_isAborted;
typedef boost::function<bool (SyncContext &,
SyncOptions &)> Callback_t;
/**
* Callback to be invoked after setting up local sources, but
* before running the engine. May throw exception to indicate
* error and return true to stop sync without error.
*/
typedef boost::function<bool (SyncContext &,
SyncOptions &)> Callback_t;
Callback_t m_startCallback;
/**
@ -555,6 +560,26 @@ public:
virtual std::list<std::string> insertManyItems(CreateSource createSource, int startIndex = 1, int numItems = 0, int size = -1);
virtual std::list<std::string> insertManyItems(TestingSyncSource *source, int startIndex = 1, int numItems = 0, int size = -1);
/**
* Update existing items. Must match a corresponding previous call to
* insertManyItems().
*
* @param revision revision number, used to distinguish different generations of each item
* @param luids result from corresponding insertManyItems() call
* @param offset skip that many items at the start of luids before updating the following ones
*/
void updateManyItems(CreateSource createSource, int startIndex, int numItems, int size,
int revision,
std::list<std::string> &luids,
int offset);
/**
* Delete items. Skips offset items in luids before deleting numItems.
*/
void removeManyItems(CreateSource createSource, int numItems,
std::list<std::string> &luids,
int offset);
/**
* update every single item, using config.update
*/
@ -586,7 +611,9 @@ public:
virtual void testLocalDeleteAll();
virtual void testComplexInsert();
virtual void testLocalUpdate();
void doChanges(bool restart);
virtual void testChanges();
virtual void testChangesMultiCycles();
virtual void testImport();
virtual void testImportDelete();
virtual void testRemoveProperties();
@ -615,7 +642,6 @@ public:
ClientTestConfig::LinkedItems_t getParentChildData();
};
int countItemsOfType(TestingSyncSource *source, int state);
std::list<std::string> listItemsOfType(TestingSyncSource *source, int state);
/**
@ -719,6 +745,15 @@ protected:
virtual void testRefreshFromServerSemantic();
virtual void testRefreshStatus();
void doRestartSync(SyncMode mode);
void testTwoWayRestart();
void testSlowRestart();
void testRefreshFromLocalRestart();
void testOneWayFromLocalRestart();
void testRefreshFromRemoteRestart();
void testOneWayFromRemoteRestart();
void testManyRestarts();
void testCopy();
virtual void testUpdate();
@ -846,6 +881,20 @@ protected:
CT_WRAP_ASSERT(file, line, doSync(options));
}
virtual void postSync(int res, const std::string &logname);
private:
void allSourcesInsert();
void allSourcesUpdate();
void allSourcesDeleteAll();
void allSourcesInsertMany(int startIndex, int numItems,
std::map<int, std::list<std::string> > &luids);
void allSourcesUpdateMany(int startIndex, int numItems,
int revision,
std::map<int, std::list<std::string> > &luids,
int offset);
void allSourcesRemoveMany(int numItems,
std::map<int, std::list<std::string> > &luids,
int offset);
};
/*

View file

@ -267,7 +267,8 @@ def step2(resultdir, result, servers, indents, srcdir, shellprefix, backenddir):
# then get added at the end.
for source in ('file_task', 'file_event', 'file_contact', 'eds_contact', 'eds_event'):
os.chdir (srcdir)
fout,fin=popen2.popen2(shellprefix + " env LD_LIBRARY_PATH=build-synthesis/src/.libs SYNCEVOLUTION_BACKEND_DIR="+backenddir +" CLIENT_TEST_SOURCES="+source+" ./client-test -h")
cmd = shellprefix + " env LD_LIBRARY_PATH=build-synthesis/src/.libs SYNCEVOLUTION_BACKEND_DIR="+backenddir +" CLIENT_TEST_PEER_CAN_RESTART=1 CLIENT_TEST_SOURCES="+source+" ./client-test -h"
fout,fin=popen2.popen2(cmd)
os.chdir(oldpath)
for line in fout:
l = line.partition('Client::Sync::'+source+'::')[2].rpartition('\n')[0]

View file

@ -854,7 +854,7 @@ class SyncEvolutionCheckout(GitCheckout):
name, context.workdir,
# parameter to autogen.sh in SyncEvolution: also
# check for clean Synthesis source
"env SYNTHESISSRC=../libsynthesis %s" % options.shell,
"SYNTHESISSRC=../libsynthesis %s" % options.shell,
"git@gitorious.org:meego-middleware/syncevolution.git",
revision)
@ -922,7 +922,7 @@ if options.sourcedir:
else:
sync = GitCopy("syncevolution",
options.workdir,
"env SYNTHESISSRC=%s %s" % (libsynthesis.basedir, options.shell),
"SYNTHESISSRC=%s %s" % (libsynthesis.basedir, options.shell),
options.sourcedir,
options.syncevotag)
else:
@ -1167,7 +1167,7 @@ context.add(test)
test = SyncEvolutionTest("syncevohttp",
compile,
"", options.shell,
"Client::Sync::eds_event Client::Sync::eds_contact",
"Client::Sync::eds_event Client::Sync::eds_contact Client::Sync::eds_event_eds_contact",
[ "eds_event", "eds_contact" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_LOG=syncevohttp.log "
@ -1178,6 +1178,8 @@ test = SyncEvolutionTest("syncevohttp",
# server supports refresh-from-client, use it for
# more efficient test setup
"CLIENT_TEST_DELETE_REFRESH=1 "
# server supports multiple cycles inside the same session
"CLIENT_TEST_PEER_CAN_RESTART=1 "
"CLIENT_TEST_SKIP="
# server does not detect duplicates (uses file backend), detecting on the
# client breaks syncing (see '[SyncEvolution] 409 "item merged" in client')

View file

@ -88,6 +88,7 @@ my $yahoo = $server =~ /yahoo/;
my $davical = $server =~ /davical/;
my $apple = $server =~ /apple/;
my $oracle = $server =~ /oracle/;
my $radicale = $server =~ /radicale/;
my $evolution = $client =~ /evolution/;
my $addressbook = $client =~ /addressbook/;
@ -412,6 +413,11 @@ sub NormalizeItem {
s/^ATTENDEE([^\n:]*);LANGUAGE=([^\n;:]*)/ATTENDEE$1/mg;
}
if ($radicale) {
# remove extensions added by server
s/^(X-RADICALE-NAME)(;[^:;\n]*)*:.*\r?\n?//gm;
}
if ($google || $yahoo) {
# default status is CONFIRMED
s/^STATUS:CONFIRMED\r?\n?//gm;

View file

@ -149,6 +149,7 @@ class SyncMLSession:
def reply(self, data, type, meta, final, session):
'''sent reply to HTTP client and/or close down normally'''
logger.debug("reply session %s final %s data len %d %s", session, final, len(data), meta)
self.logMessage("outgoing", self.request, data, type)
# When the D-Bus server sends an empty array, Python binding
# puts the four chars in 'None' into the data array?!
if data and len(data) > 0 and data != 'None':
@ -183,6 +184,9 @@ class SyncMLSession:
def start(self, request, config, url):
'''start a new session based on the incoming message'''
data = request.content.read()
type = request.getHeader('content-type')
self.logMessage("incoming", request, data, type)
logger.debug("requesting new session")
self.object = Context.getDBusServer()
self.request = request
@ -216,12 +220,13 @@ class SyncMLSession:
# feed new data into SyncEvolution and wait for reply
request.content.seek(0, 0)
self.connection.Process(request.content.read(),
request.getHeader('content-type'))
self.connection.Process(data, type)
SyncMLSession.sessions.append(self)
def process(self, request, data):
'''process next message by client in running session'''
type = request.getHeader('content-type')
self.logMessage("incoming", request, data, type)
if self.request:
# message resend?! Ignore old request.
logger.debug("message resend?!")
@ -231,8 +236,13 @@ class SyncMLSession:
deferred.addCallback(self.done)
deferred.addErrback(self.done)
self.request = request
self.connection.Process(data,
request.getHeader('content-type'))
self.connection.Process(data, type)
def logMessage(self, direction, request, data, type):
if 'plain' in type or "+xml" in type:
logger.debug("processing %s message of type %s and length %d:\n%s" % (direction, type, len(data), data))
else:
logger.debug("processing %s message of type %s and length %d, binary data" % (direction, type, len(data)))
class SyncMLPost(resource.Resource):
isLeaf = True

View file

@ -28,8 +28,8 @@
- 20060406T163000
-DTSTART;TZID=/softwarestudio.org/Olson_20011030_5/Europe/Berlin:
- 20060406T160000
+DTEND:20060406T143000Z
+DTSTART:20060406T140000Z
+DTEND:20060406T163000
+DTSTART:20060406T160000
UID:20060406T211449Z-4562-727-1-63@gollum
DTSTAMP:20060406T211449Z
LAST-MODIFIED:20060416T203532Z
@ -124,6 +124,17 @@
CREATED:20060416T204808Z
LAST-MODIFIED:20060416T204808Z
END:VEVENT
@@ -196,8 +176,8 @@
BEGIN:VEVENT
UID:20060416T204136Z-4272-727-1-247@gollum
DTSTAMP:20060416T204136Z
-DTSTART:20060406T190000Z
-DTEND:20060406T193000Z
+DTSTART:20060406T210000
+DTEND:20060406T213000
TRANSP:TRANSPARENT
SEQUENCE:4
SUMMARY:all fields
@@ -211,7 +191,7 @@
characters:\na-umlaut ä\nexclamation mark !\nampersand disabled\nhash
#\nleft angle bracket disabled\nright angle bracket disabled\nleft square
@ -133,8 +144,14 @@
^\npercent %\ntilde ~\ntick `\nbacktick `\ndouble quotation - not tested
because Evolution encodes it incorrectly\nsingle quotation '\ncolon :\n
semicolon \;\ncomma \,\n
@@ -234,7 +214,7 @@
DTEND:20060406T200000Z
@@ -230,11 +210,11 @@
BEGIN:VEVENT
UID:20060416T204922Z-4272-727-1-250@gollum
DTSTAMP:20060416T204922Z
-DTSTART:20060406T193000Z
-DTEND:20060406T200000Z
+DTSTART:20060406T213000
+DTEND:20060406T220000
TRANSP:OPAQUE
SEQUENCE:2
-CATEGORIES:BUSINESS,MEETING
@ -162,7 +179,17 @@
CREATED:20080407T193241Z
LAST-MODIFIED:20080407T193241
END:VEVENT
@@ -280,7 +262,7 @@
@@ -272,15 +254,15 @@
BEGIN:VEVENT
UID:20080407T193125Z-19554-727-1-50@gollum
DTSTAMP:20080407T193125Z
-DTSTART:20080413T090000Z
-DTEND:20080413T093000Z
+DTSTART:20080413T110000
+DTEND:20080413T113000
TRANSP:OPAQUE
SEQUENCE:7
SUMMARY:Recurring: Modified
CLASS:PUBLIC
CREATED:20080407T193241Z
LAST-MODIFIED:20080407T193647
@ -171,7 +198,17 @@
DESCRIPTION:second instance modified
END:VEVENT
END:VCALENDAR
@@ -299,7 +281,7 @@
@@ -291,15 +273,15 @@
BEGIN:VEVENT
UID:20080407T193125Z-19554-727-1-50@gollum
DTSTAMP:20080407T193125Z
-DTSTART:20080420T100000Z
-DTEND:20080420T103000Z
+DTSTART:20080420T120000
+DTEND:20080420T123000
TRANSP:OPAQUE
SEQUENCE:7
SUMMARY:Recurring: Modified II
CLASS:PUBLIC
CREATED:20080407T193241Z
LAST-MODIFIED:20080407T193647
@ -199,6 +236,39 @@
CREATED:20080407T193241Z
LAST-MODIFIED:20080407T193241
END:VEVENT
@@ -329,8 +312,8 @@
BEGIN:VEVENT
UID:20080407T193125Z-19554-727-1-50-XX@gollum
DTSTAMP:20080407T193125Z
-DTSTART:20080413T090000Z
-DTEND:20080413T093000Z
+DTSTART:20080413T110000
+DTEND:20080413T113000
TRANSP:OPAQUE
SEQUENCE:7
SUMMARY:Recurring 2: Modified
@@ -348,8 +331,8 @@
BEGIN:VEVENT
UID:20080407T193125Z-19554-727-1-50-YY@gollum
DTSTAMP:20080407T193125Z
-DTSTART:20080413T090000Z
-DTEND:20080413T093000Z
+DTSTART:20080413T110000
+DTEND:20080413T113000
TRANSP:OPAQUE
SEQUENCE:7
SUMMARY:Recurring 3: Modified
@@ -367,8 +350,8 @@
BEGIN:VEVENT
UID:20080407T193125Z-19554-727-1-50-YY@gollum
DTSTAMP:20080407T193125Z
-DTSTART:20080420T100000Z
-DTEND:20080420T103000Z
+DTSTART:20080420T120000
+DTEND:20080420T123000
TRANSP:OPAQUE
SEQUENCE:7
SUMMARY:Recurring 3: Modified II
@@ -383,28 +366,11 @@
BEGIN:VCALENDAR
PRODID:-//Ximian//NONSGML Evolution Calendar//EN
@ -225,8 +295,8 @@
DTSTAMP:20060416T205224Z
-DTSTART;TZID=EST/EDT:20060406T140000
-DTEND;TZID=EST/EDT:20060406T143000
+DTSTART:20060406T180000Z
+DTEND:20060406T183000Z
+DTSTART:20060406T200000
+DTEND:20060406T203000
TRANSP:OPAQUE
SEQUENCE:2
SUMMARY:timezone New York with custom definition for 2006