private ServerBootstrap createBootStrap(int bossCount, int workerCount) {
// profiler, collector
ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Boss"));
NioServerBossPool nioServerBossPool = new NioServerBossPool(boss, bossCount, ThreadNameDeterminer.CURRENT);
ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Worker"));
NioWorkerPool nioWorkerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
NioServerSocketChannelFactory nioClientSocketChannelFactory = new NioServerSocketChannelFactory(nioServerBossPool, nioWorkerPool);
return new ServerBootstrap(nioClientSocketChannelFactory);
}
java类org.jboss.netty.channel.socket.nio.NioServerBossPool的实例源码
PinpointServerSocket.java 文件源码
项目:apm-agent
阅读 36
收藏 0
点赞 0
评论 0
NettyServerTransport.java 文件源码
项目:mandrel
阅读 34
收藏 0
点赞 0
评论 0
public void start() {
if (local) {
channelFactory = new DefaultLocalServerChannelFactory();
} else {
bossExecutor = nettyServerConfig.getBossExecutor();
int bossThreadCount = nettyServerConfig.getBossThreadCount();
ioWorkerExecutor = nettyServerConfig.getWorkerExecutor();
int ioWorkerThreadCount = nettyServerConfig.getWorkerThreadCount();
channelFactory = new NioServerSocketChannelFactory(new NioServerBossPool(bossExecutor, bossThreadCount, ThreadNameDeterminer.CURRENT),
new NioWorkerPool(ioWorkerExecutor, ioWorkerThreadCount, ThreadNameDeterminer.CURRENT));
}
start(channelFactory);
}
PinpointServerAcceptor.java 文件源码
项目:pinpoint
阅读 29
收藏 0
点赞 0
评论 0
private ServerBootstrap createBootStrap(int bossCount, int workerCount) {
// profiler, collector
ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Boss", true));
NioServerBossPool nioServerBossPool = new NioServerBossPool(boss, bossCount, ThreadNameDeterminer.CURRENT);
ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Worker", true));
NioWorkerPool nioWorkerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
NioServerSocketChannelFactory nioClientSocketChannelFactory = new NioServerSocketChannelFactory(nioServerBossPool, nioWorkerPool);
return new ServerBootstrap(nioClientSocketChannelFactory);
}
RaftAgent.java 文件源码
项目:libraft
阅读 32
收藏 0
点赞 0
评论 0
/**
* Initialize the local Raft server.
* <p/>
* Sets up the service implementation classes, creates database
* tables and starts any thread pools necessary. Following this
* call all service classes are <strong>fully initialized</strong>.
* Even though various threads are started they <strong>will not</strong>
* use or interact with the service implementation classes. Callers
* still have exclusive access to the system.
* <p/>
* This method should <strong>only</strong> be called once before {@link RaftAgent#start()}.
*
* @throws StorageException if the persistence components cannot be initialized
* @throws IllegalStateException if this method is called multiple times
*/
public synchronized void initialize() throws StorageException {
checkState(!running);
checkState(!initialized);
checkState(setupConversion);
// start up the snapshots subsystem
snapshotStore.initialize();
// check that the snapshot metadata and the filesystem agree
// FIXME (AG): this _may_ be expensive, especially if the user never bothers to clean out snapshots!
// FIXME (AG): warning, warning - this is upfront work - probably a very, very bad idea
snapshotStore.reconcileSnapshots();
// initialize the log and store
jdbcLog.initialize();
jdbcStore.initialize();
// initialize the various thread pools
nonIoExecutorService = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
ioExecutorService = Executors.newCachedThreadPool();
serverBossPool = new NioServerBossPool(ioExecutorService, 1);
clientBossPool = new NioClientBossPool(ioExecutorService, 1);
workerPool = new NioWorkerPool(ioExecutorService, 3);
// TODO (AG): avoid creating threads in the initialize() method
// initialize the networking subsystem
sharedWorkerPool = new ShareableWorkerPool<NioWorker>(workerPool);
ServerSocketChannelFactory serverChannelFactory = new NioServerSocketChannelFactory(serverBossPool, sharedWorkerPool);
ClientSocketChannelFactory clientChannelFactory = new NioClientSocketChannelFactory(clientBossPool, sharedWorkerPool);
raftNetworkClient.initialize(nonIoExecutorService, serverChannelFactory, clientChannelFactory, raftAlgorithm);
raftAlgorithm.initialize();
initialized = true;
}
NettyServerBossPoolBuilder.java 文件源码
项目:Camel
阅读 31
收藏 0
点赞 0
评论 0
/**
* Creates a new boss pool.
*/
public BossPool build() {
return new NioServerBossPool(Executors.newCachedThreadPool(), bossCount, new CamelNettyThreadNameDeterminer(pattern, name));
}
ThriftServer.java 文件源码
项目:mandrel
阅读 32
收藏 0
点赞 0
评论 0
@Inject
public ThriftServer(final NiftyProcessor processor, ThriftServerConfig config, @ThriftServerTimer Timer timer,
Map<String, ThriftFrameCodecFactory> availableFrameCodecFactories, Map<String, TDuplexProtocolFactory> availableProtocolFactories,
@ThriftServerWorkerExecutor Map<String, ExecutorService> availableWorkerExecutors, NiftySecurityFactoryHolder securityFactoryHolder, boolean local) {
checkNotNull(availableFrameCodecFactories, "availableFrameCodecFactories cannot be null");
checkNotNull(availableProtocolFactories, "availableProtocolFactories cannot be null");
NiftyProcessorFactory processorFactory = new NiftyProcessorFactory() {
@Override
public NiftyProcessor getProcessor(TTransport transport) {
return processor;
}
};
String transportName = config.getTransportName();
String protocolName = config.getProtocolName();
checkState(availableFrameCodecFactories.containsKey(transportName), "No available server transport named " + transportName);
checkState(availableProtocolFactories.containsKey(protocolName), "No available server protocol named " + protocolName);
workerExecutor = config.getOrBuildWorkerExecutor(availableWorkerExecutors);
if (local) {
log.warn("Using local server");
configuredPort = 0;
ioThreads = 0;
ioExecutor = null;
acceptorThreads = 0;
acceptorExecutor = null;
serverChannelFactory = new DefaultLocalServerChannelFactory();
} else {
configuredPort = config.getPort();
acceptorExecutor = newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("thrift-acceptor-%s").build());
acceptorThreads = config.getAcceptorThreadCount();
ioExecutor = newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("thrift-io-%s").build());
ioThreads = config.getIoThreadCount();
serverChannelFactory = new NioServerSocketChannelFactory(new NioServerBossPool(acceptorExecutor, acceptorThreads, ThreadNameDeterminer.CURRENT),
new NioWorkerPool(ioExecutor, ioThreads, ThreadNameDeterminer.CURRENT));
}
ThriftServerDef thriftServerDef = ThriftServerDef.newBuilder().name("thrift").listen(configuredPort)
.limitFrameSizeTo((int) config.getMaxFrameSize().toBytes()).clientIdleTimeout(config.getIdleConnectionTimeout())
.withProcessorFactory(processorFactory).limitConnectionsTo(config.getConnectionLimit())
.limitQueuedResponsesPerConnection(config.getMaxQueuedResponsesPerConnection())
.thriftFrameCodecFactory(availableFrameCodecFactories.get(transportName)).protocol(availableProtocolFactories.get(protocolName))
.withSecurityFactory(securityFactoryHolder.niftySecurityFactory).using(workerExecutor).taskTimeout(config.getTaskExpirationTimeout()).build();
NettyServerConfigBuilder nettyServerConfigBuilder = NettyServerConfig.newBuilder();
nettyServerConfigBuilder.getServerSocketChannelConfig().setBacklog(config.getAcceptBacklog());
nettyServerConfigBuilder.setBossThreadCount(config.getAcceptorThreadCount());
nettyServerConfigBuilder.setWorkerThreadCount(config.getIoThreadCount());
nettyServerConfigBuilder.setTimer(timer);
NettyServerConfig nettyServerConfig = nettyServerConfigBuilder.build();
transport = new NettyServerTransport(thriftServerDef, nettyServerConfig, allChannels, local);
}