C++ ACE_GUARD类(方法)实例源码

下面列出了C++ ACE_GUARD 类(方法)源码代码实例,从而了解它的用法。

作者:hermanzh    项目:serve   
void AddNewSockets()
        {
            ACE_GUARD(ACE_Thread_Mutex, Guard, m_NewSockets_Lock);

            if (m_NewSockets.empty())
                return;

            for (SocketSet::const_iterator i = m_NewSockets.begin(); i != m_NewSockets.end(); ++i)
            {
                WorldSocket* sock = (*i);

                if (sock->IsClosed())
                {
                    sock->RemoveReference();
                    --m_Connections;
                }
                else
                    m_Sockets.insert(sock);
            }

            m_NewSockets.clear();
        }

作者:helixu    项目:wow-cat   
void
Client_Service_Handler::handle_write_stream (
  const ACE_Asynch_Write_Stream::Result &result)
{
  ACE_GUARD (ACE_SYNCH_RECURSIVE_MUTEX, guard, this->mtx_);

  this->pending_writes_--;

  if (!result.success () || 0 == result.bytes_transferred ())
  {
    // Error

    result.message_block ().release ();

    ACE_DEBUG ((LM_DEBUG, ACE_TEXT (
      "Client_Service_Handler::handle_write_stream: error: %d\n"),
      result.error ()));

    this->cancel_and_close ();
  }
  else if (result.bytes_transferred () < result.bytes_to_write ())
  {
    // More to write...

    if (this->write (result.message_block(),
      result.bytes_to_write () - result.bytes_transferred ()) < 0)
    {
      result.message_block ().release ();

      this->cancel_and_close ();
    }
  }
  else
  {
    // Wrote it all

    result.message_block ().release ();
  }
}

作者:FrenchCOR    项目:Serve   
void ObjectAccessor::RemoveCorpse(Corpse* corpse)
{
    ASSERT(corpse && corpse->GetType() != CORPSE_BONES);

    //TODO: more works need to be done for corpse and other world object
    if (Map* map = corpse->FindMap())
    {
        corpse->DestroyForNearbyPlayers();
        if (corpse->IsInGrid())
            map->Remove(corpse, false);
        else
        {
            corpse->RemoveFromWorld();
            corpse->ResetMap();
        }
    }
    else
        corpse->RemoveFromWorld();

    // Critical section
    {
        //TRINITY_WRITE_GUARD(ACE_RW_Thread_Mutex, i_corpseLock);
		ACE_GUARD(LockType, g, i_corpseGuard);

        Player2CorpsesMapType::iterator iter = i_player2corpse.find(corpse->GetOwnerGUID());
        if (iter == i_player2corpse.end()) // TODO: Fix this
            return;

		CellPair cell_pair = Trinity::ComputeCellPair(corpse->GetPositionX(), corpse->GetPositionY());
		uint32 cell_id = (cell_pair.y_coord * TOTAL_NUMBER_OF_CELLS_PER_MAP) + cell_pair.x_coord;
        // build mapid*cellid -> guid_set map
        CellCoord cellCoord = Trinity::ComputeCellCoord(corpse->GetPositionX(), corpse->GetPositionY());
        // sObjectMgr->DeleteCorpseCellData(corpse->GetMapId(), cellCoord.GetId(), GUID_LOPART(corpse->GetOwnerGUID()));
		sObjectMgr->DeleteCorpseCellData(corpse->GetMapId(), cell_id, GUID_LOPART(corpse->GetOwnerGUID()));
		
        i_player2corpse.erase(iter);
    }
}

作者:OspreyHu    项目:ATC   
void
CC_LockSet::unlock (CosConcurrencyControl::lock_mode mode)
{
  ORBSVCS_DEBUG ((LM_DEBUG,
              "CC_LockSet::unlock\n"));

  CC_LockModeEnum lm = lmconvert (mode);

  ACE_GUARD (TAO_SYNCH_MUTEX, ace_mon, this->mlock_);

  if (lock_[lm] == 0) // This lock is not held.
    throw CosConcurrencyControl::LockNotHeld();
  else
    lock_[lm]--;

  // If we do not have a lock held in a weaker mode than the
  // strongest held and we have requests on the semaphore signal
  // the semaphore.
  while (lock_queue_.size () > 0)
    {
      CC_LockModeEnum lock_on_queue = CC_EM;

      lock_queue_.dequeue_head (lock_on_queue);

      if (compatible (lock_on_queue) == 1)
        {
          if (semaphore_.release () == -1)
            throw CORBA::INTERNAL ();
          lock_[lock_on_queue]++;
        }
      else
        {
          lock_queue_.enqueue_head (lock_on_queue);
          break;
        }
    }
  this->dump ();
}

作者:edenduthi    项目:rtft   
void P3LeafMesh::createRemoteService(const SAPInfo* hint, const UUIDPtr& uuid, const UUIDPtr& sid, ServiceParamsPtr& params, UUIDPtr& iid) throw (ServiceException&) {
    ACE_GUARD(ACE_SYNCH_RECURSIVE_MUTEX, ace_mon, m_lock);
    if (hint == 0) {
        throw ServiceException(ServiceException::SERVICE_WITHOUT_IMPL);
    }
    Endpoint endpoint;
    ACE_Connector<P3MeshClientHandler, ACE_SOCK_Connector> connector;
    hint->getFirstEndpoint(endpoint);
    QoSEndpoint qosE = *(endpoint.getQoS());
    UUIDPtr runtimeUUID;
    getUUID(runtimeUUID);
    UUIDPtr fid;
    getFID(fid);
    P3MeshClientHandler* clientHandler = new P3MeshClientHandler(
            runtimeUUID,
            fid,
            qosE,
            false, false, 0, 0, 0, 0);

    if (connector.connect(clientHandler, endpoint.getAddr()) == -1) {
        ACE_ERROR((LM_ERROR, ACE_TEXT("(%T)%@\n"),
                ACE_TEXT("(%T)ERROR: P3Mesh::createRemoteService - connect failed:")));
        clientHandler->close();
        clientHandler = 0;
        delete clientHandler;
    } else {
        ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - Connect OK!\n")));
    }

    int ret = clientHandler->createService(params, iid);
    clientHandler->close();
    delete clientHandler;
    if (ret == -1) {
        ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - failed to create, not enough resources\n")));
        throw ServiceException(ServiceException::INSUFFICIENT_RESOURCES);
    }
    ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - service created!\n")));
}

作者:akostriko    项目:ATC   
void
LiveEntry::status (LiveStatus l)
{
  {
    ACE_GUARD (TAO_SYNCH_MUTEX, mon, this->lock_);
    this->liveliness_ = l;
    if (l == LS_ALIVE)
      {
        ACE_Time_Value now (ACE_High_Res_Timer::gettimeofday_hr());
        this->next_check_ = now + owner_->ping_interval();
      }
    if (l == LS_TRANSIENT && !this->reping_available())
      {
        this->liveliness_ = LS_LAST_TRANSIENT;
      }
  }
  this->update_listeners ();

  if (this->listeners_.size() > 0)
    {
      if (ImR_Locator_i::debug () > 2)
        {
          ORBSVCS_DEBUG ((LM_DEBUG,
                          ACE_TEXT ("(%P|%t) LiveEntry::status change, ")
                          ACE_TEXT ("server = %C status = %s\n"),
                          this->server_.c_str(),
                          status_name (this->liveliness_)));
        }
      this->owner_->schedule_ping (this);
    }
  else
    {
      if (this->owner_->remove_per_client_entry (this))
        {
          delete (this);
        }
    }
}

作者:OspreyHu    项目:ATC   
void
TAO_Object_Adapter::close (int wait_for_completion)
{
  this->check_close (wait_for_completion);

  // Shutting down the ORB causes all object adapters to be destroyed,
  // since they cannot exist in the absence of an ORB. Shut down is
  // complete when all ORB processing (including request processing
  // and object deactivation or other operations associated with
  // object adapters) has completed and the object adapters have been
  // destroyed. In the case of the POA, this means that all object
  // etherealizations have finished and root POA has been destroyed
  // (implying that all descendent POAs have also been destroyed).
  TAO_Root_POA *root = 0;
#if (TAO_HAS_MINIMUM_POA == 0) && !defined (CORBA_E_COMPACT) && !defined (CORBA_E_MICRO)
  TAO_POAManager_Factory* factory = 0;
#endif
  {
    ACE_GUARD (ACE_Lock, ace_mon, this->lock ());
    if (this->root_ == 0)
      return;
    root = this->root_;
    this->root_ = 0;

#if (TAO_HAS_MINIMUM_POA == 0) && !defined (CORBA_E_COMPACT) && !defined (CORBA_E_MICRO)
    if (this->poa_manager_factory_ == 0)
      return;
    factory = this->poa_manager_factory_;
    this->poa_manager_factory_ = 0;
#endif
  }
  CORBA::Boolean etherealize_objects = true;
  root->destroy (etherealize_objects, wait_for_completion);
  ::CORBA::release (root);
#if (TAO_HAS_MINIMUM_POA == 0) && !defined (CORBA_E_COMPACT) && !defined (CORBA_E_MICRO)
  release_poa_manager_factory (factory);
#endif
}

作者:FrenchCOR    项目:Serve   
void ObjectAccessor::Update(uint32 /*diff*/) {
	UpdateDataMapType update_players;

	// Critical section
	{
		ACE_GUARD(LockType, g, i_updateGuard);

		while (!i_objects.empty()) {
			Object* obj = *i_objects.begin();
			ASSERT(obj && obj->IsInWorld());
			i_objects.erase(i_objects.begin());
			obj->BuildUpdate(update_players);
		}
	}

	WorldPacket packet; // here we allocate a std::vector with a size of 0x10000
	for (UpdateDataMapType::iterator iter = update_players.begin();
			iter != update_players.end(); ++iter) {
		iter->second.BuildPacket(&packet);
		iter->first->GetSession()->SendPacket(&packet);
		packet.clear(); // clean the string
	}
}

作者:OspreyHu    项目:ATC   
void
TAO_CEC_MT_Dispatching::activate (void)
{
  ACE_GUARD (TAO_SYNCH_MUTEX, ace_mon, this->lock_);

  if (this->active_ != 0)
    return;

  this->active_ = 1;

  if (this->task_.activate (this->thread_creation_flags_,
                            this->nthreads_,
                            1,
                            this->thread_priority_) == -1)
    {
      if (this->force_activate_ != 0)
        {
          if (this->task_.activate (THR_BOUND, this->nthreads_) == -1)
            ORBSVCS_ERROR ((LM_ERROR,
                        "EC (%P|%t) cannot activate dispatching queue"));
        }
    }
}

作者:Blumfiel    项目:TBCPv   
template <class ACE_LOCK, class ALLOCATOR> void
ACE_Timeprobe_Ex<ACE_LOCK, ALLOCATOR>::timeprobe (u_long event)
{
  ACE_GUARD (ACE_LOCK, ace_mon, this->lock_);

  this->timeprobes_[this->current_size_].event_.event_number_ = event;
  this->timeprobes_[this->current_size_].event_type_ = ACE_timeprobe_t::NUMBER;
  this->timeprobes_[this->current_size_].time_ = ACE_OS::gethrtime ();
  this->timeprobes_[this->current_size_].thread_ = ACE_OS::thr_self ();

  ++this->current_size_;

#if !defined (ACE_TIMEPROBE_ASSERTS_FIXED_SIZE)
  // wrap around to the beginning on overflow
  if (this->current_size_ >= this->max_size_)
    {
      this->current_size_ = 0;
      this->report_buffer_full_ = 1;
    }
#endif /* ACE_TIMEPROBE_ASSERTS_FIXED_SIZE */

  ACE_ASSERT (this->current_size_ < this->max_size_);
}

作者:OspreyHu    项目:ATC   
void
TAO_Notify_ThreadPool_Supplier::subscription_change (const CosNotification::EventTypeSeq & added,
                                      const CosNotification::EventTypeSeq & /*removed */
                                      )
{
  ACE_GUARD (TAO_SYNCH_MUTEX, mon, this->lock_);

  // Count the number of consumers connect and signal the supplier thread when the expected count have connected.
  // Only 1 consumer connects at a time.
  if (added.length () > 0)
    {
      // Set the domain and type nams in the event's fixed header.
      this->event_[consumer_count_].header.fixed_header.event_type.domain_name = CORBA::string_dup(added[0].domain_name);
      this->event_[consumer_count_].header.fixed_header.event_type.type_name = CORBA::string_dup(added[0].type_name);

      ++this->consumer_count_;

      ACE_DEBUG ((LM_DEBUG, "(%P,%t) Received Type %d: (%s)\n", this->consumer_count_, added[0].type_name.in ()));

      if (this->consumer_count_ == this->expected_consumer_count_)
        this->consumers_connected_.signal ();
    }
}

作者:CCJ    项目:AC   
void
Client_Service_Handler::open (ACE_HANDLE h, ACE_Message_Block&)
{
  ACE_GUARD (ACE_SYNCH_RECURSIVE_MUTEX, guard, this->mtx_);

  if (this->ssl_stream_.open (*this, h, 0, this->proactor ()) != 0)
  {
    ACE_DEBUG ((LM_DEBUG, ACE_TEXT (
      "Client_Service_Handler::open: ACE_SSL_Asynch_Stream::open failed, %d\n"),
      (int)errno));
    this->cancel_and_close ();
  }
  else
  {
    ACE_Message_Block *mb = 0;
    ACE_NEW_NORETURN(mb, ACE_Message_Block (DATA_SIZE));

    if (this->read_data () < 0 || this->write_data () < 0)
    {
      this->cancel_and_close ();
    }
  }
}

作者:asdlei0    项目:AC   
TAO_CEC_ProxyPushConsumer_Guard::
    ~TAO_CEC_ProxyPushConsumer_Guard (void)
{
  // This access is safe because guard objects are created on the
  // stack, only one thread has access to them
  if (!this->locked_)
    return;

  {
    ACE_GUARD (ACE_Lock, ace_mon, *this->lock_);
    // If the guard fails there is not much we can do, raising an
    // exception is wrong, the client has *no* way to handle that kind
    // of error.  Even worse, there is no exception to raise in that
    // case.
    // @@ Returning something won't work either, the error should be
    // logged though!

    --this->refcount_;
    if (this->refcount_ != 0)
      return;
  }
  this->event_channel_->destroy_proxy (this->proxy_);
}

作者:CCJ    项目:AC   
void ConnectionCache::close_all_connections()
      {
        INET_TRACE ("ConnectionCache::close_all_connections");

        ACE_MT (ACE_GUARD (ACE_SYNCH_MUTEX,
                           guard_,
                           this->lock_));

        map_iter_type iter = this->cache_map_.end ();
        for (iter = this->cache_map_.begin ();
             iter != this->cache_map_.end ();
             ++iter)
          {
            if ((*iter).int_id_.state () == ConnectionCacheValue::CST_CLOSED)
              {
                connection_type* conn = (*iter).int_id_.connection ();
                (*iter).int_id_.connection (0);
                (*iter).int_id_.state (ConnectionCacheValue::CST_CLOSED);
                delete conn;
              }
          }
        this->cache_map_.unbind_all ();
      }

作者:ProjectStarGat    项目:StarGate-Plus-EM   
void WorldLog::outLog(char const *fmt, ...) {
	if (LogWorld()) {
		ACE_GUARD(ACE_Thread_Mutex, Guard, Lock);
		ASSERT(i_file);

		va_list args;
		va_start(args, fmt);
		vfprintf(i_file, fmt, args);
		//fprintf(i_file, "\n");
		va_end(args);

		fflush(i_file);
	}

	if (sLog->GetLogDB() && m_dbWorld) {
		va_list ap2;
		va_start(ap2, fmt);
		char nnew_str[MAX_QUERY_LEN];
		vsnprintf(nnew_str, MAX_QUERY_LEN, fmt, ap2);
		sLog->outDB(LOG_TYPE_WORLD, nnew_str);
		va_end(ap2);
	}
}

作者:OspreyHu    项目:ATC   
TAO_AMH_Response_Handler::~TAO_AMH_Response_Handler (void)
{
  this->transport_->remove_reference ();

  // Since we are destroying the object we put a huge lock around the
  // whole destruction process (just paranoid).
  {
    ACE_GUARD (TAO_SYNCH_MUTEX, ace_mon, this->mutex_);

    if (this->response_expected_ == 0) //oneway ?
      {
        return;
      }

    // If the ResponseHandler is being destroyed before a reply has
    // been sent to the client, we send a system exception
    // CORBA::NO_RESPONSE, with minor code to indicate the problem.
    if (this->rh_reply_status_ == TAO_RS_SENT)
      {
        return;
      }
  }

  // If sending the exception to the client fails, then we just give
  // up, release the transport and return.
  try
    {
      CORBA::NO_RESPONSE ex (CORBA::SystemException::_tao_minor_code
                             (TAO_AMH_REPLY_LOCATION_CODE,
                              EFAULT),
                             CORBA::COMPLETED_NO);
      this->_tao_rh_send_exception (ex);
    }
  catch (...)
    {
    }
}

作者:svn2githu    项目:OpenDD   
void
ReliableSession::synack_received(ACE_Message_Block* control)
{
  if (! this->active_) return; // sub send syn, then doesn't receive them.

  // Already received ack.
  if (this->acked_) return;

  const TransportHeader& header =
    this->link_->receive_strategy()->received_header();

  // Not from the remote peer for this session.
  if (this->remote_peer_ != header.source_) return;

  Serializer serializer(control, header.swap_bytes());

  MulticastPeer local_peer;
  serializer >> local_peer; // sent as remote_peer

  // Ignore sample if not destined for us:
  if (local_peer != this->link_->local_peer()) return;

  {
    ACE_GUARD(ACE_SYNCH_MUTEX,
              guard,
              this->ack_lock_);

    if (this->acked_) return; // already acked

    this->syn_watchdog_.cancel();
    this->acked_ = true;
  }

  // Force the TransportImpl to re-evaluate pending associations
  // after deliver synack to every session.
  this->link_->set_check_fully_association();
}

作者:edenduthi    项目:rtft   
void P3ReplicationGroup::onPeerClosure(UUIDPtr& uuid) {
    ACE_GUARD(ACE_SYNCH_RECURSIVE_MUTEX, mon, m_lock);
    ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure uuid=%s\n"), uuid->toString().c_str()));
    bool isPrimary = this->isPeerPrimary(uuid);


    if (isPrimary) {
        if (m_replicas.size() == 0) {
            ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - PRIMARY CLOSED with no replicas available?\n")));
            this->close();
            return;
        }
        this->m_primaryPeer = m_replicas.front();
        m_replicas.pop_front();
        ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - PRIMARY CLOSED\n")));
        this->printMembers();
        if (this->isThisPeerPrimary()) {
            ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - NEW PRIMARY\n")));
            if (!fireOnChangeToPrimary()) {
                ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - failed to change service mode!n")));
                this->close();
            }
            UUIDPtr iid;
            this->m_svcPtr->getIID(iid);
            UUIDPtr currentReplicaIID(iid);
            ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - NEW PRIMARY - Change IID from (%s) to (%s)\n"),
                    currentReplicaIID->toString().c_str(), m_groupUUID->toString().c_str()));
            m_ft->getOverlay()->getRuntime()->changeIIDOfService(m_sid, currentReplicaIID, m_groupUUID); //m_iid);

            return;
        }
    } else {
        this->removeReplica(uuid); //updates the group, a new primary is implicit elected
        ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::onPeerClosure - REPLICA CLOSED\n")));
        this->printMembers();
    }
}

作者:tempbottl    项目:OpenDD   
void
RecorderImpl::remove_all_associations()
{
  DBG_ENTRY_LVL("RecorderImpl","remove_all_associations",6);

  OpenDDS::DCPS::WriterIdSeq writers;
  int size;

  ACE_GUARD(ACE_Recursive_Thread_Mutex, guard, this->publication_handle_lock_);

  {
    ACE_READ_GUARD(ACE_RW_Thread_Mutex, read_guard, this->writers_lock_);

    size = static_cast<int>(writers_.size());
    writers.length(size);

    WriterMapType::iterator curr_writer = writers_.begin();
    WriterMapType::iterator end_writer = writers_.end();

    int i = 0;

    while (curr_writer != end_writer) {
      writers[i++] = curr_writer->first;
      ++curr_writer;
    }
  }

  try {
    CORBA::Boolean dont_notify_lost = 0;

    if (0 < size) {
      remove_associations(writers, dont_notify_lost);
    }

  } catch (const CORBA::Exception&) {
  }
}

作者:manu    项目:TA   
void
  Client_Request_Interceptor::send_request (PortableInterceptor::ClientRequestInfo_ptr ri)
  {
    // Test TC
    test_transport_current (ACE_TEXT ("send_request"));

    CORBA::Boolean const response_expected =
      ri->response_expected ();

    // Oneway?
    if (response_expected)
      ACE_DEBUG ((LM_DEBUG, ACE_TEXT("CRI    (%P|%t) Sending a two-way\n")));
    else
      ACE_DEBUG ((LM_DEBUG, ACE_TEXT("CRI    (%P|%t) Sending a one-way\n")));

    // Make the context to send the context to the target
    IOP::ServiceContext sc;
    sc.context_id = Test::Transport::CurrentTest::ContextTag;

    // How long can a number really get?
    char temp[32];
    {
      ACE_GUARD (TAO_SYNCH_MUTEX, monitor, this->lock_);

      ACE_OS::sprintf (temp, "%ld", this->requestID_);
      ++this->requestID_;
    }

    CORBA::ULong string_len = ACE_OS::strlen (temp) + 1;
    CORBA::Octet *buf = CORBA::OctetSeq::allocbuf (string_len);
    ACE_OS::strcpy (reinterpret_cast <char *> (buf), temp);

    sc.context_data.replace (string_len, string_len, buf, 1);

    // Add this context to the service context list.
    ri->add_request_service_context (sc, 0);
  }


问题


面经


文章

微信
公众号

扫码关注公众号