• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ causedBy函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中causedBy函数的典型用法代码示例。如果您正苦于以下问题:C++ causedBy函数的具体用法?C++ causedBy怎么用?C++ causedBy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了causedBy函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: enforceLegacyWriteConcern

    Status enforceLegacyWriteConcern( MultiCommandDispatch* dispatcher,
                                      const StringData& dbName,
                                      const BSONObj& options,
                                      const HostOpTimeMap& hostOpTimes,
                                      vector<LegacyWCResponse>* legacyWCResponses ) {

        if ( hostOpTimes.empty() ) {
            return Status::OK();
        }

        for ( HostOpTimeMap::const_iterator it = hostOpTimes.begin(); it != hostOpTimes.end();
            ++it ) {

            const ConnectionString& shardEndpoint = it->first;
            const HostOpTime hot = it->second;
            const OpTime& opTime = hot.opTime;
            const OID& electionId = hot.electionId;

            LOG( 3 ) << "enforcing write concern " << options << " on " << shardEndpoint.toString()
                     << " at opTime " << opTime.toStringPretty() << " with electionID "
                     << electionId;

            BSONObj gleCmd = buildGLECmdWithOpTime( options, opTime, electionId );

            RawBSONSerializable gleCmdSerial( gleCmd );
            dispatcher->addCommand( shardEndpoint, dbName, gleCmdSerial );
        }

        dispatcher->sendAll();

        vector<Status> failedStatuses;

        while ( dispatcher->numPending() > 0 ) {

            ConnectionString shardEndpoint;
            RawBSONSerializable gleResponseSerial;

            Status dispatchStatus = dispatcher->recvAny( &shardEndpoint, &gleResponseSerial );
            if ( !dispatchStatus.isOK() ) {
                // We need to get all responses before returning
                failedStatuses.push_back( dispatchStatus );
                continue;
            }

            BSONObj gleResponse = BatchSafeWriter::stripNonWCInfo( gleResponseSerial.toBSON() );

            // Use the downconversion tools to determine if this GLE response is ok, a
            // write concern error, or an unknown error we should immediately abort for.
            BatchSafeWriter::GLEErrors errors;
            Status extractStatus = BatchSafeWriter::extractGLEErrors( gleResponse, &errors );
            if ( !extractStatus.isOK() ) {
                failedStatuses.push_back( extractStatus );
                continue;
            }

            LegacyWCResponse wcResponse;
            wcResponse.shardHost = shardEndpoint.toString();
            wcResponse.gleResponse = gleResponse;
            if ( errors.wcError.get() ) {
                wcResponse.errToReport = errors.wcError->getErrMessage();
            }

            legacyWCResponses->push_back( wcResponse );
        }

        if ( failedStatuses.empty() ) {
            return Status::OK();
        }

        StringBuilder builder;
        builder << "could not enforce write concern";

        for ( vector<Status>::const_iterator it = failedStatuses.begin();
            it != failedStatuses.end(); ++it ) {
            const Status& failedStatus = *it;
            if ( it == failedStatuses.begin() ) {
                builder << causedBy( failedStatus.toString() );
            }
            else {
                builder << ":: and ::" << failedStatus.toString();
            }
        }

        return Status( failedStatuses.size() == 1u ? failedStatuses.front().code() : 
                                                     ErrorCodes::MultipleErrorsOccurred, 
                       builder.str() );
    }
开发者ID:AlanLiu-AI,项目名称:mongo,代码行数:87,代码来源:batch_downconvert.cpp


示例2: warning

 void DBException::traceIfNeeded( const DBException& e ) {
     if( traceExceptions && ! inShutdown() ){
         warning() << "DBException thrown" << causedBy( e ) << endl;
         printStackTrace();
     }
 }
开发者ID:dngorecki,项目名称:mongo,代码行数:6,代码来源:assert_util.cpp


示例3: updateChunkWriteStatsAndSplitIfNeeded


//.........这里部分代码省略.........

        if (minIsInf || maxIsInf) {
            // We don't want to reset _dataWritten since we want to check the other side right away
        } else {
            // We're splitting, so should wait a bit
            chunk->clearBytesWritten();
        }

        // We assume that if the chunk being split is the first (or last) one on the collection,
        // this chunk is likely to see more insertions. Instead of splitting mid-chunk, we use the
        // very first (or last) key as a split point.
        //
        // This heuristic is skipped for "special" shard key patterns that are not likely to produce
        // monotonically increasing or decreasing values (e.g. hashed shard keys).
        if (KeyPattern::isOrderedKeyPattern(manager->getShardKeyPattern().toBSON())) {
            if (minIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
                if (!key.isEmpty()) {
                    splitPoints.front() = key.getOwned();
                }
            } else if (maxIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
                if (!key.isEmpty()) {
                    splitPoints.back() = key.getOwned();
                }
            }
        }

        const auto suggestedMigrateChunk =
            uassertStatusOK(shardutil::splitChunkAtMultiplePoints(opCtx,
                                                                  chunk->getShardId(),
                                                                  nss,
                                                                  manager->getShardKeyPattern(),
                                                                  manager->getVersion(),
                                                                  chunkRange,
                                                                  splitPoints));

        // Balance the resulting chunks if the option is enabled and if the shard suggested a chunk
        // to balance
        const bool shouldBalance = [&]() {
            if (!balancerConfig->shouldBalanceForAutoSplit())
                return false;

            auto collStatus =
                Grid::get(opCtx)->catalogClient()->getCollection(opCtx, manager->getns());
            if (!collStatus.isOK()) {
                log() << "Auto-split for " << nss << " failed to load collection metadata"
                      << causedBy(redact(collStatus.getStatus()));
                return false;
            }

            return collStatus.getValue().value.getAllowBalance();
        }();

        log() << "autosplitted " << nss << " chunk: " << redact(chunk->toString()) << " into "
              << (splitPoints.size() + 1) << " parts (desiredChunkSize " << desiredChunkSize << ")"
              << (suggestedMigrateChunk ? "" : (std::string) " (migrate suggested" +
                          (shouldBalance ? ")" : ", but no migrations allowed)"));

        // Reload the chunk manager after the split
        auto routingInfo = uassertStatusOK(
            Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
                                                                                         nss));

        if (!shouldBalance || !suggestedMigrateChunk) {
            return;
        }

        // Top chunk optimization - try to move the top chunk out of this shard to prevent the hot
        // spot from staying on a single shard. This is based on the assumption that succeeding
        // inserts will fall on the top chunk.

        // We need to use the latest chunk manager (after the split) in order to have the most
        // up-to-date view of the chunk we are about to move
        auto suggestedChunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(
            suggestedMigrateChunk->getMin());

        ChunkType chunkToMove;
        chunkToMove.setNS(nss.ns());
        chunkToMove.setShard(suggestedChunk->getShardId());
        chunkToMove.setMin(suggestedChunk->getMin());
        chunkToMove.setMax(suggestedChunk->getMax());
        chunkToMove.setVersion(suggestedChunk->getLastmod());

        uassertStatusOK(configsvr_client::rebalanceChunk(opCtx, chunkToMove));

        // Ensure the collection gets reloaded because of the move
        Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
    } catch (const DBException& ex) {
        chunk->clearBytesWritten();

        if (ErrorCodes::isStaleShardingError(ErrorCodes::Error(ex.getCode()))) {
            log() << "Unable to auto-split chunk " << redact(chunkRange.toString()) << causedBy(ex)
                  << ", going to invalidate routing table entry for " << nss;
            Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
        }
    }
}
开发者ID:wzqtony,项目名称:mongo,代码行数:101,代码来源:cluster_write.cpp


示例4: checkSlaveQueryResult

    auto_ptr<DBClientCursor> DBClientReplicaSet::query(const string &ns, Query query, int nToReturn, int nToSkip,
            const BSONObj *fieldsToReturn, int queryOptions, int batchSize) {

        if ( queryOptions & QueryOption_SlaveOk ) {
            // we're ok sending to a slave
            // we'll try 2 slaves before just using master
            // checkSlave will try a different slave automatically after a failure
            for ( int i=0; i<3; i++ ) {
                try {
                    return checkSlaveQueryResult( checkSlave()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize) );
                }
                catch ( DBException &e ) {
                    LOG(1) << "can't query replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
                }
            }
        }

        return checkMaster()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize);
    }
开发者ID:cangove,项目名称:mongo,代码行数:19,代码来源:dbclient_rs.cpp


示例5: LazyState

    void DBClientReplicaSet::say( Message& toSend, bool isRetry ) {

        if( ! isRetry )
            _lazyState = LazyState();

        int lastOp = -1;
        bool slaveOk = false;

        if ( ( lastOp = toSend.operation() ) == dbQuery ) {
            // TODO: might be possible to do this faster by changing api
            DbMessage dm( toSend );
            QueryMessage qm( dm );
            if ( ( slaveOk = ( qm.queryOptions & QueryOption_SlaveOk ) ) ) {

                for ( int i = _lazyState._retries; i < 3; i++ ) {
                    try {
                        DBClientConnection* slave = checkSlave();
                        slave->say( toSend );

                        _lazyState._lastOp = lastOp;
                        _lazyState._slaveOk = slaveOk;
                        _lazyState._retries = i;
                        _lazyState._lastClient = slave;
                        return;
                    }
                    catch ( DBException &e ) {
                       LOG(1) << "can't callLazy replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
                    }
                }
            }
        }

        DBClientConnection* master = checkMaster();
        master->say( toSend );

        _lazyState._lastOp = lastOp;
        _lazyState._slaveOk = slaveOk;
        _lazyState._retries = 3;
        _lazyState._lastClient = master;
        return;
    }
开发者ID:cangove,项目名称:mongo,代码行数:41,代码来源:dbclient_rs.cpp


示例6: shardInfo

StatusWith<vector<MigrateInfo>> Balancer::_getCandidateChunks(OperationContext* txn) {
    vector<CollectionType> collections;

    Status collsStatus =
        grid.catalogManager(txn)->getCollections(txn, nullptr, &collections, nullptr);
    if (!collsStatus.isOK()) {
        return collsStatus;
    }

    if (collections.empty()) {
        return vector<MigrateInfo>();
    }

    // Get a list of all the shards that are participating in this balance round along with any
    // maximum allowed quotas and current utilization. We get the latter by issuing
    // db.serverStatus() (mem.mapped) to all shards.
    //
    // TODO: skip unresponsive shards and mark information as stale.
    auto shardInfoStatus = DistributionStatus::populateShardInfoMap(txn);
    if (!shardInfoStatus.isOK()) {
        return shardInfoStatus.getStatus();
    }

    const ShardInfoMap shardInfo(std::move(shardInfoStatus.getValue()));

    if (shardInfo.size() < 2) {
        return vector<MigrateInfo>();
    }

    OCCASIONALLY warnOnMultiVersion(shardInfo);

    std::vector<MigrateInfo> candidateChunks;

    // For each collection, check if the balancing policy recommends moving anything around.
    for (const auto& coll : collections) {
        // Skip collections for which balancing is disabled
        const NamespaceString& nss = coll.getNs();

        if (!coll.getAllowBalance()) {
            LOG(1) << "Not balancing collection " << nss << "; explicitly disabled.";
            continue;
        }

        std::vector<ChunkType> allNsChunks;
        Status status = grid.catalogManager(txn)->getChunks(txn,
                                                            BSON(ChunkType::ns(nss.ns())),
                                                            BSON(ChunkType::min() << 1),
                                                            boost::none,  // all chunks
                                                            &allNsChunks,
                                                            nullptr);
        if (!status.isOK()) {
            warning() << "failed to load chunks for ns " << nss.ns() << causedBy(status);
            continue;
        }

        set<BSONObj> allChunkMinimums;
        map<string, vector<ChunkType>> shardToChunksMap;

        for (const ChunkType& chunk : allNsChunks) {
            allChunkMinimums.insert(chunk.getMin().getOwned());

            vector<ChunkType>& chunksList = shardToChunksMap[chunk.getShard()];
            chunksList.push_back(chunk);
        }

        if (shardToChunksMap.empty()) {
            LOG(1) << "skipping empty collection (" << nss.ns() << ")";
            continue;
        }

        for (ShardInfoMap::const_iterator i = shardInfo.begin(); i != shardInfo.end(); ++i) {
            // This loop just makes sure there is an entry in shardToChunksMap for every shard
            shardToChunksMap[i->first];
        }

        DistributionStatus distStatus(shardInfo, shardToChunksMap);

        // TODO: TagRange contains all the information from TagsType except for the namespace,
        //       so maybe the two can be merged at some point in order to avoid the
        //       transformation below.
        vector<TagRange> ranges;

        {
            vector<TagsType> collectionTags;
            uassertStatusOK(
                grid.catalogManager(txn)->getTagsForCollection(txn, nss.ns(), &collectionTags));
            for (const auto& tt : collectionTags) {
                ranges.push_back(
                    TagRange(tt.getMinKey().getOwned(), tt.getMaxKey().getOwned(), tt.getTag()));
                uassert(16356,
                        str::stream() << "tag ranges not valid for: " << nss.ns(),
                        distStatus.addTagRange(ranges.back()));
            }
        }

        auto statusGetDb = grid.catalogCache()->getDatabase(txn, nss.db().toString());
        if (!statusGetDb.isOK()) {
            warning() << "could not load db config to balance collection [" << nss.ns()
                      << "]: " << statusGetDb.getStatus();
            continue;
//.........这里部分代码省略.........
开发者ID:HaiJiaoXinHeng,项目名称:mongo,代码行数:101,代码来源:balance.cpp


示例7: process

    virtual void process( Message& m , AbstractMessagingPort* p , LastError * le) {
        verify( p );
        Request r( m , p );

        verify( le );
        lastError.startRequest( m , le );

        try {
            r.init();
            r.process();

            // Release connections after non-write op
            if ( ShardConnection::releaseConnectionsAfterResponse && r.expectResponse() ) {
                LOG(2) << "release thread local connections back to pool" << endl;
                ShardConnection::releaseMyConnections();
            }
        }
        catch ( AssertionException & e ) {
            LOG( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;

            le->raiseError( e.getCode() , e.what() );

            m.header()->id = r.id();

            if ( r.expectResponse() ) {
                BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
                replyToQuery( ResultFlag_ErrSet, p , m , err );
            }
        }
        catch ( DBException& e ) {
            // note that e.toString() is more detailed on a SocketException than
            // e.what().  we should think about what is the right level of detail both
            // for logging and return code.
            log() << "DBException in process: " << e.what() << endl;

            le->raiseError( e.getCode() , e.what() );

            m.header()->id = r.id();

            if ( r.expectResponse() ) {
                BSONObjBuilder b;
                b.append("$err",e.what()).append("code",e.getCode());
                if( !e._shard.empty() ) {
                    b.append("shard",e._shard);
                }
                replyToQuery( ResultFlag_ErrSet, p , m , b.obj() );
            }
        }
    }
开发者ID:qunb,项目名称:mongo,代码行数:49,代码来源:server.cpp


示例8: causedBy

 std::string causedBy( const DBException& e ){
     return causedBy( e.toString() );
 }
开发者ID:Albert-B-P,项目名称:mongo,代码行数:3,代码来源:assert_util.cpp


示例9: addReplSetShardCheck

    /**
     * Performs sanity check on the given connection string on whether the seed list
     * is consistent with the view of the set using replSetGetStatus.
     */
    bool addReplSetShardCheck( const ConnectionString& servers, string* errMsg ) {
        bool ok = false;
        BSONObj replSetStat;
        try {
            ScopedDbConnection newShardConn(servers.toString());
            ok = newShardConn->runCommand( "admin", BSON( "replSetGetStatus" << 1 ),
                    replSetStat );
            newShardConn.done();
        }
        catch ( const DBException& ex ) {
            *errMsg = str::stream() << "Error encountered while checking status of "
                    << servers.toString() << ": " << causedBy( ex );
        }

        if( !ok ) {
            if ( replSetStat["info"].str() == "configsvr" ) {
                *errMsg = "the specified mongod is a --configsvr and "
                    "should thus not be a shard server";
            }
            else {
                *errMsg = str::stream() << "error encountered calling replSetGetStatus: "
                        << replSetStat;
            }

            return false;
        }

        // if the shard has only one host, make sure it is not part of a replica set
        string setName = replSetStat["set"].str();
        string commandSetName = servers.getSetName();
        if ( commandSetName.empty() && ! setName.empty() ) {
            *errMsg = str::stream() << "host is part of set: " << setName
                        << " use replica set url format <setname>/<server1>,<server2>,....";
            return false;
        }

        if ( !commandSetName.empty() && setName.empty() ) {
            *errMsg = str::stream() << "host did not return a set name, "
                        << "is the replica set still initializing?" << replSetStat;
            return false;
        }

        // if the shard is part of replica set, make sure it is the right one
        if ( ! commandSetName.empty() && ( commandSetName != setName ) ) {
            *errMsg = str::stream() << "host is part of a different set: " << setName;
            return false;
        }

        // if the shard is part of a replica set, make sure all the hosts mentioned in
        // 'servers' are part of the set. It is fine if not all members of the set
        // are present in 'servers'.
        bool foundAll = true;
        string offendingHost;
        if ( ! commandSetName.empty() ) {
            set<string> hostSet;

            BSONElement membersElem( replSetStat["members"] );
            if ( membersElem.type() == Array ) {
                BSONArrayIteratorSorted iter( BSONArray( membersElem.Obj() ));
                while ( iter.more() ) {
                    hostSet.insert( iter.next()["name"].str() ); // host:port
                }

                vector<HostAndPort> hosts = servers.getServers();
                for ( size_t i = 0 ; i < hosts.size() ; i++ ) {
                    if (!hosts[i].hasPort()) {
                        hosts[i].setPort(CmdLine::DefaultDBPort);
                    }
                    string host = hosts[i].toString(); // host:port
                    if ( hostSet.find( host ) == hostSet.end() ) {
                        offendingHost = host;
                        foundAll = false;
                        break;
                    }
                }
            }

            if ( hostSet.empty() ) {
                *errMsg = "replSetGetStatus returned an empty set. "
                        " Please wait for the set to initialize and try again.";
                return false;
            }
        }

        if ( ! foundAll ) {
            *errMsg = str::stream() << "in seed list " << servers.toString()
                            << ", host " << offendingHost
                            << " does not belong to replica set " << setName;
            return false;
        }

        return true;
    }
开发者ID:Cassie90,项目名称:mongo,代码行数:97,代码来源:grid.cpp


示例10: newShardConn

    bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) {
        // name can be NULL, so provide a dummy one here to avoid testing it elsewhere
        string nameInternal;
        if ( ! name ) {
            name = &nameInternal;
        }

        ReplicaSetMonitorPtr rsMonitor;

        // Check whether the host (or set) exists and run several sanity checks on this request.
        // There are two set of sanity checks: making sure adding this particular shard is consistent
        // with the replica set state (if it exists) and making sure this shards databases can be
        // brought into the grid without conflict.

        if ( servers.type() == ConnectionString::SYNC ) {
            errMsg = "can't use sync cluster as a shard for replica set, "
                    "have to use <setname>/<server1>,<server2>,...";
            return false;
        }

        vector<string> dbNames;
        try {
            bool ok = false;

            {
                ScopedDbConnection newShardConn(servers.toString());

                BSONObj resIsMongos;
                ok = newShardConn->runCommand( "admin", BSON( "isdbgrid" << 1 ), resIsMongos );
                newShardConn.done();
            }

            // should return ok=0, cmd not found if it's a normal mongod
            if ( ok ) {
                errMsg = "can't add a mongos process as a shard";
                return false;
            }

            if ( servers.type() == ConnectionString::SET ) {
                if (!addReplSetShardCheck( servers, &errMsg )) {
                    return false;
                }

                // shard name defaults to the name of the replica set
                if ( name->empty() && !servers.getSetName().empty() ) {
                    *name = servers.getSetName();
                }
            }

            // In order to be accepted as a new shard, that mongod must not have any database name
            // that exists already in any other shards. If that test passes, the new shard's
            // databases are going to be entered as non-sharded db's whose primary is the
            // newly added shard.

            BSONObj resListDB;
            {
                ScopedDbConnection newShardConn(servers.toString());
                ok = newShardConn->runCommand( "admin", BSON( "listDatabases" << 1 ), resListDB );
                newShardConn.done();
            }

            if ( !ok ) {
                errMsg = str::stream() << "failed listing " << servers.toString()
                            << "'s databases:" << resListDB;;
                return false;
            }

            BSONObjIterator i( resListDB["databases"].Obj() );
            while ( i.more() ) {
                BSONObj dbEntry = i.next().Obj();
                const string& dbName = dbEntry["name"].String();
                if ( _isSpecialLocalDB( dbName ) ) {
                    // 'local', 'admin', and 'config' are system DBs and should be excluded here
                    continue;
                }
                else {
                    dbNames.push_back( dbName );
                }
            }

            if ( servers.type() == ConnectionString::SET ) {
                rsMonitor = ReplicaSetMonitor::get( servers.getSetName() );
            }
        }
        catch ( DBException& e ) {
            if ( servers.type() == ConnectionString::SET ) {
                ReplicaSetMonitor::remove( servers.getSetName() );
            }

            errMsg = str::stream() << "couldn't connect to new shard " << causedBy(e);
            return false;
        }

        // check that none of the existing shard candidate's db's exist elsewhere
        for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ) {
            DBConfigPtr config = getDBConfig( *it , false );
            if ( config.get() != NULL ) {
                ostringstream ss;
                ss << "can't add shard " << servers.toString() << " because a local database '" << *it;
                ss << "' exists in another " << config->getPrimary().toString();
//.........这里部分代码省略.........
开发者ID:Cassie90,项目名称:mongo,代码行数:101,代码来源:grid.cpp


示例11: ns

    /**
     * Outline of the delete process:
     * 1. Initialize the client for this thread if there is no client. This is for the worker
     *    threads that are attached to any of the threads servicing client requests.
     * 2. Grant this thread authorization to perform deletes.
     * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
     * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
     * 5. Delete range.
     * 6. Wait until the majority of the secondaries catch up.
     */
    bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
                                        const RangeDeleteEntry& taskDetails,
                                        long long int* deletedDocs,
                                        std::string* errMsg) {
        const string ns(taskDetails.options.range.ns);
        const BSONObj inclusiveLower(taskDetails.options.range.minKey);
        const BSONObj exclusiveUpper(taskDetails.options.range.maxKey);
        const BSONObj keyPattern(taskDetails.options.range.keyPattern);
        const WriteConcernOptions writeConcern(taskDetails.options.writeConcern);
        const bool fromMigrate = taskDetails.options.fromMigrate;
        const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs;

        const bool initiallyHaveClient = haveClient();

        if (!initiallyHaveClient) {
            Client::initThread("RangeDeleter");
        }

        *deletedDocs = 0;
        ShardForceVersionOkModeBlock forceVersion;
        {
            Helpers::RemoveSaver removeSaver("moveChunk",
                                             ns,
                                             taskDetails.options.removeSaverReason);
            Helpers::RemoveSaver* removeSaverPtr = NULL;
            if (serverGlobalParams.moveParanoia &&
                    !taskDetails.options.removeSaverReason.empty()) {
                removeSaverPtr = &removeSaver;
            }

            // log the opId so the user can use it to cancel the delete using killOp.
            unsigned int opId = txn->getCurOp()->opNum();
            log() << "Deleter starting delete for: " << ns
                  << " from " << inclusiveLower
                  << " -> " << exclusiveUpper
                  << ", with opId: " << opId
                  << endl;

            try {
                *deletedDocs =
                        Helpers::removeRange(txn,
                                             KeyRange(ns,
                                                      inclusiveLower,
                                                      exclusiveUpper,
                                                      keyPattern),
                                             false, /*maxInclusive*/
                                             writeConcern,
                                             removeSaverPtr,
                                             fromMigrate,
                                             onlyRemoveOrphans);

                if (*deletedDocs < 0) {
                    *errMsg = "collection or index dropped before data could be cleaned";
                    warning() << *errMsg << endl;

                    if (!initiallyHaveClient) {
                        txn->getClient()->shutdown();
                    }

                    return false;
                }

                log() << "rangeDeleter deleted " << *deletedDocs
                      << " documents for " << ns
                      << " from " << inclusiveLower
                      << " -> " << exclusiveUpper
                      << endl;
            }
            catch (const DBException& ex) {
                *errMsg = str::stream() << "Error encountered while deleting range: "
                                        << "ns" << ns
                                        << " from " << inclusiveLower
                                        << " -> " << exclusiveUpper
                                        << ", cause by:" << causedBy(ex);

                if (!initiallyHaveClient) {
                    txn->getClient()->shutdown();
                }

                return false;
            }
        }

        if (!initiallyHaveClient) {
            txn->getClient()->shutdown();
        }

        return true;
    }
开发者ID:3rf,项目名称:mongo,代码行数:99,代码来源:range_deleter_db_env.cpp


示例12: _runCommandForAddShard

StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
    OperationContext* txn,
    std::shared_ptr<RemoteCommandTargeter> targeter,
    const std::string* shardProposedName,
    const ConnectionString& connectionString) {
    // Check whether any host in the connection is already part of the cluster.
    Grid::get(txn)->shardRegistry()->reload(txn);
    for (const auto& hostAndPort : connectionString.getServers()) {
        std::shared_ptr<Shard> shard;
        shard = Grid::get(txn)->shardRegistry()->getShardNoReload(hostAndPort.toString());
        if (shard) {
            return {ErrorCodes::OperationFailed,
                    str::stream() << "'" << hostAndPort.toString() << "' "
                    << "is already a member of the existing shard '"
                    << shard->getConnString().toString()
                    << "' ("
                    << shard->getId()
                    << ")."};
        }
    }

    // Check for mongos and older version mongod connections, and whether the hosts
    // can be found for the user specified replset.
    auto swCommandResponse =
        _runCommandForAddShard(txn, targeter.get(), "admin", BSON("isMaster" << 1));
    if (!swCommandResponse.isOK()) {
        if (swCommandResponse.getStatus() == ErrorCodes::RPCProtocolNegotiationFailed) {
            // Mongos to mongos commands are no longer supported in the wire protocol
            // (because mongos does not support OP_COMMAND), similarly for a new mongos
            // and an old mongod. So the call will fail in such cases.
            // TODO: If/When mongos ever supports opCommands, this logic will break because
            // cmdStatus will be OK.
            return {ErrorCodes::RPCProtocolNegotiationFailed,
                    str::stream() << targeter->connectionString().toString()
                    << " does not recognize the RPC protocol being used. This is"
                    << " likely because it contains a node that is a mongos or an old"
                    << " version of mongod."};
        } else {
            return swCommandResponse.getStatus();
        }
    }

    // Check for a command response error
    auto resIsMasterStatus = std::move(swCommandResponse.getValue().commandStatus);
    if (!resIsMasterStatus.isOK()) {
        return {resIsMasterStatus.code(),
                str::stream() << "Error running isMaster against "
                << targeter->connectionString().toString()
                << ": "
                << causedBy(resIsMasterStatus)};
    }

    auto resIsMaster = std::move(swCommandResponse.getValue().response);

    // Check whether there is a master. If there isn't, the replica set may not have been
    // initiated. If the connection is a standalone, it will return true for isMaster.
    bool isMaster;
    Status status = bsonExtractBooleanField(resIsMaster, "ismaster", &isMaster);
    if (!status.isOK()) {
        return Status(status.code(),
                      str::stream() << "isMaster returned invalid 'ismaster' "
                      << "field when attempting to add "
                      << connectionString.toString()
                      << " as a shard: "
                      << status.reason());
    }
    if (!isMaster) {
        return {ErrorCodes::NotMaster,
                str::stream()
                << connectionString.toString()
                << " does not have a master. If this is a replica set, ensure that it has a"
                << " healthy primary and that the set has been properly initiated."};
    }

    const string providedSetName = connectionString.getSetName();
    const string foundSetName = resIsMaster["setName"].str();

    // Make sure the specified replica set name (if any) matches the actual shard's replica set
    if (providedSetName.empty() && !foundSetName.empty()) {
        return {ErrorCodes::OperationFailed,
                str::stream() << "host is part of set " << foundSetName << "; "
                << "use replica set url format "
                << "<setname>/<server1>,<server2>, ..."};
    }

    if (!providedSetName.empty() && foundSetName.empty()) {
        return {ErrorCodes::OperationFailed,
                str::stream() << "host did not return a set name; "
                << "is the replica set still initializing? "
                << resIsMaster};
    }

    // Make sure the set name specified in the connection string matches the one where its hosts
    // belong into
    if (!providedSetName.empty() && (providedSetName != foundSetName)) {
        return {ErrorCodes::OperationFailed,
                str::stream() << "the provided connection string (" << connectionString.toString()
                << ") does not match the actual set name "
                << foundSetName};
    }
//.........这里部分代码省略.........
开发者ID:CliffordOwino,项目名称:mongo,代码行数:101,代码来源:sharding_catalog_manager_impl.cpp


示例13: runThread


//.........这里部分代码省略.........
            // How long until the lock is forced in ms, only compared locally
            unsigned long long takeoverMS = (unsigned long long) number_field(cmdObj, "takeoverMS", 0);

            // Whether or not we should hang some threads
            int hangThreads = (int) number_field(cmdObj, "hangThreads", 0);


            boost::mt19937 gen((boost::mt19937::result_type) seed);

            boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSkew(gen, boost::uniform_int<>(0, skewRange));
            boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomWait(gen, boost::uniform_int<>(1, threadWait));
            boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSleep(gen, boost::uniform_int<>(1, threadSleep));


            int skew = 0;
            if (!lock.get()) {

                // Pick a skew, but the first two threads skew the whole range
                if(threadId == 0)
                    skew = -skewRange / 2;
                else if(threadId == 1)
                    skew = skewRange / 2;
                else skew = randomSkew() - (skewRange / 2);

                // Skew this thread
                jsTimeVirtualThreadSkew( skew );

                log() << "Initializing lock with skew of " << skew << " for thread " << threadId << endl;

                lock.reset(new DistributedLock(hostConn, lockName, takeoverMS, true ));

                log() << "Skewed time " << jsTime() << "  for thread " << threadId << endl
                      << "  max wait (with lock: " << threadWait << ", after lock: " << threadSleep << ")" << endl
                      << "  takeover in " << takeoverMS << "(ms remote)" << endl;

            }

            DistributedLock* myLock = lock.get();

            bool errors = false;
            BSONObj lockObj;
            while (keepGoing) {
                try {

                    if (myLock->lock_try("Testing distributed lock with skew.", false, &lockObj )) {

                        log() << "**** Locked for thread " << threadId << " with ts " << lockObj["ts"] << endl;

                        if( count % 2 == 1 && ! myLock->lock_try( "Testing lock re-entry.", true ) ) {
                            errors = true;
                            log() << "**** !Could not re-enter lock already held" << endl;
                            break;
                        }

                        if( count % 3 == 1 && myLock->lock_try( "Testing lock non-re-entry.", false ) ) {
                            errors = true;
                            log() << "**** !Invalid lock re-entry" << endl;
                            break;
                        }

                        count++;
                        int before = count;
                        int sleep = randomWait();
                        sleepmillis(sleep);
                        int after = count;

                        if(after != before) {
                            errors = true;
                            log() << "**** !Bad increment while sleeping with lock for: " << sleep << "ms" << endl;
                            break;
                        }

                        // Unlock only half the time...
                        if(hangThreads == 0 || threadId % hangThreads != 0) {
                            log() << "**** Unlocking for thread " << threadId << " with ts " << lockObj["ts"] << endl;
                            myLock->unlock( &lockObj );
                        }
                        else {
                            log() << "**** Not unlocking for thread " << threadId << endl;
                            DistributedLock::killPinger( *myLock );
                            // We're simulating a crashed process...
                            break;
                        }
                    }

                }
                catch( LockException& e ) {
                    log() << "*** !Could not try distributed lock." << causedBy( e ) << endl;
                    break;
                }

                sleepmillis(randomSleep());
            }

            result << "errors" << errors
                   << "skew" << skew
                   << "takeover" << (long long) takeoverMS
                   << "localTimeout" << (takeoverMS > 0);

        }
开发者ID:MSchireson,项目名称:mongo,代码行数:101,代码来源:distlock_test.cpp


示例14: dm

    DBClientBase* DBClientReplicaSet::callLazy( Message& toSend ) {
        if ( toSend.operation() == dbQuery ) {
            // TODO: might be possible to do this faster by changing api
            DbMessage dm( toSend );
            QueryMessage qm( dm );
            if ( qm.queryOptions & QueryOption_SlaveOk ) {
                for ( int i=0; i<3; i++ ) {
                    try {
                        return checkSlave()->callLazy( toSend );
                    }
                    catch ( DBException &e ) {
                    	LOG(1) << "can't callLazy replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
                    }
                }
            }
        }

        return checkMaster()->callLazy( toSend );
    }
开发者ID:LsRbls,项目名称:mongo,代码行数:19,代码来源:dbclient_rs.cpp


示例15: while

void Balancer::run() {
    Client::initThread("Balancer");

    // This is the body of a BackgroundJob so if we throw here we 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ caxpy_函数代码示例发布时间:2022-05-30
下一篇:
C++ catgets函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap