本文整理汇总了C++中severe函数的典型用法代码示例。如果您正苦于以下问题:C++ severe函数的具体用法?C++ severe怎么用?C++ severe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了severe函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: exp_cache_build_invalidate_all_msg
gw_header_t * exp_cache_build_invalidate_all_msg(exp_cache_dirty_ctx_t * ctx_p,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return NULL;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return NULL;
}
/*
** Initialize the message header
*/
gw_invalidate_all_msg.export_id = exp_cache_cnf.export_id;
gw_invalidate_all_msg.gateway_rank = srv_rank;
gw_invalidate_all_msg.nb_gateways = ctx_p->nb_cache_servers;
return &gw_invalidate_all_msg;
}
开发者ID:baoboa,项目名称:rozofs,代码行数:28,代码来源:exp_cache_dirty_mgt.c
示例2: exp_cache_build_configuration_msg
gw_configuration_t * exp_cache_build_configuration_msg(exp_cache_dirty_ctx_t * ctx_p,
uint16_t port,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return NULL;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return NULL;
}
/*
** Initialize the message header
*/
gw_configuration_msg.hdr.export_id = exp_cache_cnf.export_id;
gw_configuration_msg.hdr.gateway_rank = srv_rank;
gw_configuration_msg.hdr.nb_gateways = ctx_p->nb_cache_servers;
// gw_configuration_msg.ipAddr = exp_cache_cnf.export_ipAddr;
// gw_configuration_msg.port = exp_cache_cnf.export_port;
// gw_configuration_msg.eid.eid_len = exp_cache_cnf.eid_nb;
// gw_configuration_msg.eid.eid_val = exp_cache_cnf.eid_list;
return &gw_configuration_msg;
}
开发者ID:baoboa,项目名称:rozofs,代码行数:35,代码来源:exp_cache_dirty_mgt.c
示例3: lk
void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
LockMongoFilesExclusive lockMongoFiles;
privateViews.clearWritableBits(oldPrivateAddr, len);
stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
if (!UnmapViewOfFile(oldPrivateAddr)) {
DWORD dosError = GetLastError();
severe() << "UnMapViewOfFile for " << filename() << " failed with error "
<< errnoWithDescription(dosError) << " in MemoryMappedFile::remapPrivateView"
<< endl;
fassertFailed(16168);
}
void* newPrivateView =
MapViewOfFileEx(maphandle, // file mapping handle
FILE_MAP_READ, // access
0,
0, // file offset, high and low
0, // bytes to map, 0 == all
oldPrivateAddr); // we want the same address we had before
if (0 == newPrivateView) {
DWORD dosError = GetLastError();
severe() << "MapViewOfFileEx for " << filename() << " failed with error "
<< errnoWithDescription(dosError) << " (file size is " << len << ")"
<< " in MemoryMappedFile::remapPrivateView" << endl;
}
fassert(16148, newPrivateView == oldPrivateAddr);
return newPrivateView;
}
开发者ID:ksuarz,项目名称:mongo,代码行数:31,代码来源:mmap_windows.cpp
示例4: exp_dirty_active_switch
/**
* Change the active set of the dirty management context for a given fron end
@param ctx_p: pointer to the dirty managmenet context
@param srv_rank : Server to build the message for
@retval none
*/
void exp_dirty_active_switch(exp_cache_dirty_ctx_t *ctx_p,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
int inactive_idx;
uint32_t count;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return ;
}
inactive_idx = 1 - front_end_p->active_idx;
/*
** Clear the data of the inactive set
*/
memset(front_end_p->parent[inactive_idx],0,sizeof(exp_dirty_dirty_parent_t));
count = (1 << ctx_p->level0_sz)/EXP_CHILD_BITMAP_BIT_SZ;
memset(front_end_p->child[inactive_idx],0,sizeof(exp_dirty_dirty_child_t)*count);
/*
** Switch the active set
*/
front_end_p->active_idx = inactive_idx;
}
开发者ID:baoboa,项目名称:rozofs,代码行数:42,代码来源:exp_cache_dirty_mgt.c
示例5: transaction
long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
BSONObj oplogEntry;
try {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), "local", MODE_X);
bool success = Helpers::getLast(txn, rsoplog, oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
return 0;
}
}
catch (const DBException& ex) {
severe() << "Problem reading " << rsoplog << ": " << ex.toStatus();
fassertFailed(18904);
}
BSONElement hashElement = oplogEntry[hashFieldName];
if (hashElement.eoo()) {
severe() << "Most recent entry in " << rsoplog << " missing \"" << hashFieldName <<
"\" field";
fassertFailed(18902);
}
if (hashElement.type() != NumberLong) {
severe() << "Expected type of \"" << hashFieldName << "\" in most recent " <<
rsoplog << " entry to have type NumberLong, but found " <<
typeName(hashElement.type());
fassertFailed(18903);
}
return hashElement.safeNumberLong();
}
开发者ID:3rf,项目名称:mongo,代码行数:30,代码来源:bgsync.cpp
示例6: multiSyncApply
// This free function is used by the writer threads to apply each op
void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
initializeWriterThread();
OperationContextImpl txn;
txn.setReplicatedWrites(false);
DisableDocumentValidation validationDisabler(&txn);
// allow us to get through the magic barrier
txn.lockState()->setIsBatchWriter(true);
bool convertUpdatesToUpserts = true;
for (std::vector<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
try {
const Status s = SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts);
if (!s.isOK()) {
severe() << "Error applying operation (" << it->toString() << "): " << s;
fassertFailedNoTrace(16359);
}
} catch (const DBException& e) {
severe() << "writer worker caught exception: " << causedBy(e)
<< " on: " << it->toString();
if (inShutdown()) {
return;
}
fassertFailedNoTrace(16360);
}
}
}
开发者ID:hAhmadz,项目名称:mongo,代码行数:32,代码来源:sync_tail.cpp
示例7: transaction
long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
BSONObj oplogEntry;
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), "local", MODE_X);
bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
return 0;
}
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
} catch (const DBException& ex) {
severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
fassertFailed(18904);
}
long long hash;
auto status = bsonExtractIntegerField(oplogEntry, kHashFieldName, &hash);
if (!status.isOK()) {
severe() << "Most recent entry in " << rsOplogName << " is missing or has invalid \""
<< kHashFieldName << "\" field. Oplog entry: " << oplogEntry << ": " << status;
fassertFailed(18902);
}
return hash;
}
开发者ID:yanghongkjxy,项目名称:mongo,代码行数:27,代码来源:bgsync.cpp
示例8: reference
/**
geo_proc_createIndex
create a context given by index
That function tries to allocate a free context.
In case of success, it returns the index of the context.
@param : context_id is the reference of the context
@retval : MS controller reference (if OK)
retval -1 if out of context.
*/
uint32_t geo_proc_createIndex(uint32_t context_id) {
geo_proc_ctx_t *p;
/*
** Get the first free context
*/
p = geo_proc_getObjCtx_p(context_id);
if (p == NULL) {
severe( "MS ref out of range: %u", context_id );
return RUC_NOK;
}
/*
** return an error if the context is not free
*/
if (p->free == FALSE) {
severe( "the context is not free : %u", context_id );
return RUC_NOK;
}
/*
** reinitilisation of the context
*/
geo_proc_ctxInit(p, FALSE);
/*
** remove it for the linked list
*/
geo_proc_context_allocated++;
p->free = FALSE;
ruc_objRemove((ruc_obj_desc_t*) p);
return RUC_OK;
}
开发者ID:ptulpen,项目名称:test1234,代码行数:44,代码来源:geo_replica_ctx.c
示例9: lk
void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
Lock::DBRead lk(txn->lockState(), rsoplog);
BSONObj oplogEntry;
try {
if (!Helpers::getLast(txn, rsoplog, oplogEntry)) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
_lastAppliedHash = 0;
return;
}
}
catch (const DBException& ex) {
severe() << "Problem reading " << rsoplog << ": " << ex.toStatus();
fassertFailed(18904);
}
BSONElement hashElement = oplogEntry[hashFieldName];
if (hashElement.eoo()) {
severe() << "Most recent entry in " << rsoplog << " missing \"" << hashFieldName <<
"\" field";
fassertFailed(18902);
}
if (hashElement.type() != NumberLong) {
severe() << "Expected type of \"" << hashFieldName << "\" in most recent " <<
rsoplog << " entry to have type NumberLong, but found " <<
typeName(hashElement.type());
fassertFailed(18903);
}
_lastAppliedHash = hashElement.safeNumberLong();
}
开发者ID:leonidbl91,项目名称:mongo,代码行数:29,代码来源:bgsync.cpp
示例10: invariant
void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
Timestamp oplogApplicationStartPoint,
Timestamp topOfOplog) {
invariant(!oplogApplicationStartPoint.isNull());
invariant(!topOfOplog.isNull());
// Check if we have any unapplied ops in our oplog. It is important that this is done after
// deleting the ragged end of the oplog.
if (oplogApplicationStartPoint == topOfOplog) {
log()
<< "No oplog entries to apply for recovery. appliedThrough is at the top of the oplog.";
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
severe() << "Applied op " << oplogApplicationStartPoint.toBSON()
<< " not found. Top of oplog is " << topOfOplog.toBSON() << '.';
fassertFailedNoTrace(40313);
}
log() << "Replaying stored operations from " << oplogApplicationStartPoint.toBSON()
<< " (exclusive) to " << topOfOplog.toBSON() << " (inclusive).";
DBDirectClient db(opCtx);
auto cursor = db.query(NamespaceString::kRsOplogNamespace.ns(),
QUERY("ts" << BSON("$gte" << oplogApplicationStartPoint)),
/*batchSize*/ 0,
/*skip*/ 0,
/*projection*/ nullptr,
QueryOption_OplogReplay);
// Check that the first document matches our appliedThrough point then skip it since it's
// already been applied.
if (!cursor->more()) {
// This should really be impossible because we check above that the top of the oplog is
// strictly > appliedThrough. If this fails it represents a serious bug in either the
// storage engine or query's implementation of OplogReplay.
severe() << "Couldn't find any entries in the oplog >= "
<< oplogApplicationStartPoint.toBSON() << " which should be impossible.";
fassertFailedNoTrace(40293);
}
auto firstTimestampFound =
fassertStatusOK(40291, OpTime::parseFromOplogEntry(cursor->nextSafe())).getTimestamp();
if (firstTimestampFound != oplogApplicationStartPoint) {
severe() << "Oplog entry at " << oplogApplicationStartPoint.toBSON()
<< " is missing; actual entry found is " << firstTimestampFound.toBSON();
fassertFailedNoTrace(40292);
}
// Apply remaining ops one at at time, but don't log them because they are already logged.
UnreplicatedWritesBlock uwb(opCtx);
while (cursor->more()) {
auto entry = cursor->nextSafe();
fassertStatusOK(40294,
SyncTail::syncApply(opCtx, entry, OplogApplication::Mode::kRecovering));
_consistencyMarkers->setAppliedThrough(
opCtx, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry)));
}
}
开发者ID:i80and,项目名称:mongo,代码行数:59,代码来源:replication_recovery.cpp
示例11: storaged_lbg_initialize
int storaged_lbg_initialize(mstorage_t *s, int index) {
int lbg_size;
int ret;
int i;
int local=1;
DEBUG_FUNCTION;
#if 0
/*
** configure the callback that is intended to perform the polling of the storaged on each TCP connection
*/
ret = north_lbg_attach_application_supervision_callback(s->lbg_id[index],(af_stream_poll_CBK_t)storcli_lbg_cnx_polling);
if (ret < 0)
{
severe("Cannot configure Soraged polling callback");
}
ret = north_lbg_set_application_tmo4supervision(s->lbg_id[index],20);
if (ret < 0)
{
severe("Cannot configure application TMO");
}
#endif
/*
** set the dscp for storio connections
*/
af_inet_storaged_conf.dscp=(uint8_t)common_config.storio_dscp;
af_inet_storaged_conf.dscp = af_inet_storaged_conf.dscp <<2;
/*
** store the IP address and port in the list of the endpoint
*/
lbg_size = s->sclients_nb;
for (i = 0; i < lbg_size; i++)
{
my_list[i].remote_port_host = s->sclients[i].port;
my_list[i].remote_ipaddr_host = s->sclients[i].ipv4;
if (!is_this_ipV4_local(s->sclients[i].ipv4)) local = 0;
}
af_inet_storaged_conf.recv_srv_type = ROZOFS_RPC_SRV;
af_inet_storaged_conf.rpc_recv_max_sz = rozofs_storcli_south_large_buf_sz;
ret = north_lbg_configure_af_inet(s->lbg_id[index],
s->host,
INADDR_ANY,0,
my_list,
ROZOFS_SOCK_FAMILY_STORAGE_NORTH,lbg_size,&af_inet_storaged_conf, local);
if (ret < 0)
{
severe("Cannot create Load Balancing Group %d for storaged %s",s->lbg_id[index],s->host);
return -1;
}
north_lbg_set_next_global_entry_idx_p(s->lbg_id[index],&storcli_next_storio_global_index);
return 0;
}
开发者ID:ptulpen,项目名称:test1234,代码行数:56,代码来源:storage_lbg.c
示例12: while
/* applies oplog from "now" until endOpTime using the applier threads for initial sync*/
void SyncTail::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) {
unsigned long long bytesApplied = 0;
unsigned long long entriesApplied = 0;
while (true) {
OpQueue ops;
OperationContextImpl ctx;
while (!tryPopAndWaitForMore(&ops, getGlobalReplicationCoordinator())) {
// nothing came back last time, so go again
if (ops.empty()) continue;
// Check if we reached the end
const BSONObj currentOp = ops.back();
const OpTime currentOpTime = currentOp["ts"]._opTime();
// When we reach the end return this batch
if (currentOpTime == endOpTime) {
break;
}
else if (currentOpTime > endOpTime) {
severe() << "Applied past expected end " << endOpTime << " to " << currentOpTime
<< " without seeing it. Rollback?" << rsLog;
fassertFailedNoTrace(18693);
}
// apply replication batch limits
if (ops.getSize() > replBatchLimitBytes)
break;
if (ops.getDeque().size() > replBatchLimitOperations)
break;
};
if (ops.empty()) {
severe() << "got no ops for batch...";
fassertFailedNoTrace(18692);
}
const BSONObj lastOp = ops.back().getOwned();
// Tally operation information
bytesApplied += ops.getSize();
entriesApplied += ops.getDeque().size();
multiApply(ops.getDeque());
OpTime lastOpTime = applyOpsToOplog(&ops.getDeque());
// if the last op applied was our end, return
if (lastOpTime == endOpTime) {
LOG(1) << "SyncTail applied " << entriesApplied
<< " entries (" << bytesApplied << " bytes)"
<< " and finished at opTime " << endOpTime.toStringPretty();
return;
}
} // end of while (true)
}
开发者ID:Aaron20141021,项目名称:mongo,代码行数:56,代码来源:sync_tail.cpp
示例13: log
BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char* ns = o.getStringField("ns");
// capped collections
Collection* collection = db->getCollection(ns);
if (collection && collection->isCapped()) {
log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
return BSONObj();
}
const int retryMax = 3;
for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
if (retryCount != 1) {
// if we are retrying, sleep a bit to let the network possibly recover
sleepsecs(retryCount * retryCount);
}
try {
bool ok = missingObjReader.connect(HostAndPort(_hostname));
if (!ok) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
} catch (const SocketException&) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
// get _id from oplog entry to create query to fetch document.
const BSONElement opElem = o.getField("op");
const bool isUpdate = !opElem.eoo() && opElem.str() == "u";
const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"];
if (idElem.eoo()) {
severe() << "cannot fetch missing document without _id field: " << o.toString();
fassertFailedNoTrace(28742);
}
BSONObj query = BSONObjBuilder().append(idElem).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch (const SocketException&) {
warning() << "network problem detected while fetching a missing document from the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
} catch (DBException& e) {
error() << "assertion fetching missing object: " << e.what() << endl;
throw;
}
// success!
return missingObj;
}
// retry count exceeded
msgasserted(15916,
str::stream() << "Can no longer connect to initial sync source: " << _hostname);
}
开发者ID:Jaryli,项目名称:mongo,代码行数:60,代码来源:sync_tail.cpp
示例14: StorageEngineLockFile
void ServiceContextMongoD::createLockFile() {
try {
_lockFile.reset(new StorageEngineLockFile(storageGlobalParams.dbpath));
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
<< storageGlobalParams.dbpath << ": " << ex.what(),
false);
}
bool wasUnclean = _lockFile->createdByUncleanShutdown();
auto openStatus = _lockFile->open();
if (storageGlobalParams.readOnly && openStatus == ErrorCodes::IllegalOperation) {
_lockFile.reset();
} else {
uassertStatusOK(openStatus);
}
if (wasUnclean) {
if (storageGlobalParams.readOnly) {
severe() << "Attempted to open dbpath in readOnly mode, but the server was "
"previously not shut down cleanly.";
fassertFailedNoTrace(34416);
}
warning() << "Detected unclean shutdown - " << _lockFile->getFilespec() << " is not empty.";
}
}
开发者ID:AnkyrinRepeat,项目名称:mongo,代码行数:26,代码来源:service_context_d.cpp
示例15: severe
Message OpMsgBuilder::finish() {
if (kDebugBuild && !disableDupeFieldCheck_forTest.load()) {
std::set<StringData> seenFields;
for (auto elem : resumeBody().asTempObj()) {
if (!(seenFields.insert(elem.fieldNameStringData()).second)) {
severe() << "OP_MSG with duplicate field '" << elem.fieldNameStringData()
<< "' : " << redact(resumeBody().asTempObj());
fassert(40474, false);
}
}
}
invariant(_state == kBody);
invariant(_bodyStart);
invariant(!_openBuilder);
_state = kDone;
const auto size = _buf.len();
MSGHEADER::View header(_buf.buf());
header.setMessageLength(size);
// header.setRequestMsgId(...); // These are currently filled in by the networking layer.
// header.setResponseToMsgId(...);
header.setOpCode(dbMsg);
return Message(_buf.release());
}
开发者ID:ShaneHarvey,项目名称:mongo,代码行数:25,代码来源:op_msg.cpp
示例16: ON_BLOCK_EXIT
void BackgroundSync::producerThread() {
Client::initThread("rsBackgroundSync");
AuthorizationSession::get(cc())->grantInternalAuthorization();
_threadPoolTaskExecutor.startup();
ON_BLOCK_EXIT([this]() {
_threadPoolTaskExecutor.shutdown();
_threadPoolTaskExecutor.join();
});
while (!inShutdown()) {
try {
_producerThread();
} catch (const DBException& e) {
std::string msg(str::stream() << "sync producer problem: " << e.toString());
error() << msg;
_replCoord->setMyHeartbeatMessage(msg);
sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors.
} catch (const std::exception& e2) {
severe() << "sync producer exception: " << e2.what();
fassertFailed(28546);
}
}
stop();
}
开发者ID:AnkyrinRepeat,项目名称:mongo,代码行数:25,代码来源:bgsync.cpp
示例17: while
void RSDataSync::_run() {
Client::initThread("rsSync");
AuthorizationSession::get(cc())->grantInternalAuthorization();
// Overwrite prefetch index mode in BackgroundSync if ReplSettings has a mode set.
auto&& replSettings = _replCoord->getSettings();
if (replSettings.isPrefetchIndexModeSet())
_replCoord->setIndexPrefetchConfig(replSettings.getPrefetchIndexMode());
while (!_bgsync->inShutdown()) {
// After a reconfig, we may not be in the replica set anymore, so
// check that we are in the set (and not an arbiter) before
// trying to sync with other replicas.
// TODO(spencer): Use a condition variable to await loading a config
if (_replCoord->getMemberState().startup()) {
warning() << "did not receive a valid config yet";
sleepsecs(1);
continue;
}
const MemberState memberState = _replCoord->getMemberState();
// TODO(siyuan) Control the behavior using applier state.
// An arbiter can never transition to any other state, and doesn't replicate, ever
if (memberState.arbiter()) {
break;
}
// If we are removed then we don't belong to the set anymore
if (memberState.removed()) {
sleepsecs(5);
continue;
}
try {
if (_replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) {
sleepsecs(1);
continue;
}
auto status = _replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!status.isOK()) {
LOG(2) << "Failed to transition to RECOVERING to start data replication"
<< causedBy(status);
continue;
}
// Once we call into SyncTail::oplogApplication we never return, so this code only runs
// at startup. It is not valid to transition from PRIMARY to RECOVERING ever, or from
// SECONDARY to RECOVERING without holding a global X lock, so we invariant to make
// sure this never happens.
invariant(!memberState.primary() && !memberState.secondary());
SyncTail(_bgsync, multiSyncApply).oplogApplication(_replCoord);
} catch (...) {
auto status = exceptionToStatus();
severe() << "Exception thrown in RSDataSync: " << redact(status);
std::terminate();
}
}
}
开发者ID:akira-kurogane,项目名称:mongo,代码行数:60,代码来源:rs_sync.cpp
示例18: syncRollBackLocalOperations
StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
const OplogInterface& localOplog,
const OplogInterface& remoteOplog,
const RollBackLocalOperations::RollbackOperationFn& rollbackOperation) {
auto remoteIterator = remoteOplog.makeIterator();
auto remoteResult = remoteIterator->next();
if (!remoteResult.isOK()) {
return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
ErrorCodes::InvalidSyncSource, "remote oplog empty or unreadable");
}
RollBackLocalOperations finder(localOplog, rollbackOperation);
Timestamp theirTime;
while (remoteResult.isOK()) {
theirTime = remoteResult.getValue().first["ts"].timestamp();
BSONObj theirObj = remoteResult.getValue().first;
auto result = finder.onRemoteOperation(theirObj);
if (result.isOK()) {
return result.getValue();
} else if (result.getStatus().code() != ErrorCodes::NoSuchKey) {
return result;
}
remoteResult = remoteIterator->next();
}
severe() << "rollback error RS100 reached beginning of remote oplog";
log() << " them: " << remoteOplog.toString();
log() << " theirTime: " << theirTime.toStringLong();
return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
ErrorCodes::NoMatchingDocument, "RS100 reached beginning of remote oplog [1]");
}
开发者ID:vnvizitiu,项目名称:mongo,代码行数:31,代码来源:roll_back_local_operations.cpp
示例19: rozofs_storcli_north_interface_init
int rozofs_storcli_north_interface_init(uint32_t eid,uint16_t rozofsmount_instance,uint32_t instance,
int read_write_buf_count,int read_write_buf_sz)
{
int ret = 0;
char sunpath[AF_UNIX_SOCKET_NAME_SIZE];
rozofs_storcli_read_write_buf_count = read_write_buf_count;
rozofs_storcli_read_write_buf_sz = read_write_buf_sz ;
while(1)
{
storcli_north_buffer_pool_p = ruc_buf_poolCreate(rozofs_storcli_read_write_buf_count,rozofs_storcli_read_write_buf_sz);
if (storcli_north_buffer_pool_p == NULL)
{
ret = -1;
severe( "ruc_buf_poolCreate(%d,%d)", rozofs_storcli_read_write_buf_count, rozofs_storcli_read_write_buf_sz );
break;
}
/*
** create the listening af unix socket on the north interface
*/
af_unix_test_family.rpc_recv_max_sz = rozofs_storcli_read_write_buf_sz;
sprintf(sunpath,"%s%d.%d_lbg%d_inst_1",ROZOFS_SOCK_FAMILY_STORCLI_NORTH_SUNPATH,eid,rozofsmount_instance,instance);
// sprintf(sunpath,"%s%d.%d_inst_%d",ROZOFS_SOCK_FAMILY_STORCLI_NORTH_SUNPATH,eid,rozofsmount_instance,instance);
ret = af_unix_sock_listening_create("STORCLI_NORTH",
sunpath,
&af_unix_test_family
);
break;
}
return ret;
}
开发者ID:baoboa,项目名称:rozofs,代码行数:35,代码来源:rozofs_storcli_north_intf.c
示例20: quota_wbcache_init
int quota_wbcache_init()
{
int i;
if (quota_wbcache_cache_initialized) return 0;
quota_wbcache_cache_p = xmalloc(sizeof(quota_wbcache_entry_t)*QUOTA_CACHE_MAX_ENTRY);
if (quota_wbcache_cache_p != NULL)
{
memset(quota_wbcache_cache_p,0,sizeof(quota_wbcache_entry_t)*QUOTA_CACHE_MAX_ENTRY);
quota_wbcache_cache_initialized = 1;
quota_wbcache_cache_enable = 1;
}
for (i = 0; i < QUOTA_CACHE_MAX_ENTRY; i++)
{
if (pthread_rwlock_init("a_wbcache_cache_p[i].lock, NULL) != 0) {
return -1;
}
}
/**
* create the writeback thread
*/
if ((errno = pthread_create("a_wbcache_ctx_thread, NULL,
quota_wbcache_thread, NULL)) != 0) {
severe("can't create writeback cache thread %s", strerror(errno));
return -1;
}
return 0;
}
开发者ID:ptulpen,项目名称:test1234,代码行数:29,代码来源:quota_writeback_cache.c
注:本文中的severe函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论