netty 源码分析之(一)server启动初始化

来自:http://flychao88.iteye.com/blog/1553058

可参考:http://www.360doc.com/content/12/1120/22/203871_249194900.shtml

 

NioServerSocketChannelFactory创建服务端的ServerSocketChannel,采用多线程执行非阻塞IO,和Mina的设计 

模式一样,都采用了Reactor模式。其中bossExecutor、workerExecutor是两个线程池,bossExecutor用来接收客户端连接,workerExecutor用来执行非阻塞的IO操作,主要是read,write。 





Java代码    收藏代码
  1. package netty;  
  2.   
  3. import org.jboss.netty.bootstrap.ServerBootstrap;  
  4. import org.jboss.netty.channel.ChannelFactory;  
  5. import org.jboss.netty.channel.ChannelPipeline;  
  6. import org.jboss.netty.channel.ChannelPipelineFactory;  
  7. import org.jboss.netty.channel.Channels;  
  8. import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;  
  9. import org.jboss.netty.handler.codec.string.StringDecoder;  
  10. import org.jboss.netty.handler.codec.string.StringEncoder;  
  11.   
  12. import java.net.InetSocketAddress;  
  13. import java.util.concurrent.Executors;  
  14.   
  15. /** 
  16.  * Created by IntelliJ IDEA. 
  17.  * User: flychao88 
  18.  * Date: 12-6-6 
  19.  * Time: 上午10:14 
  20.  * To change this template use File | Settings | File Templates. 
  21.  */  
  22. public class DiscardServer {  
  23.     public static void main(String[] args) throws Exception {  
  24.         ChannelFactory factory = new NioServerSocketChannelFactory(  
  25.             Executors.newCachedThreadPool(),  
  26.             Executors.newCachedThreadPool());  
  27.         ServerBootstrap bootstrap = new ServerBootstrap (factory);  
  28.         bootstrap.setPipelineFactory(new ChannelPipelineFactory() {  
  29.             public ChannelPipeline getPipeline() {  
  30.                  ChannelPipeline pipeline = Channels.pipeline();  
  31.                 pipeline.addLast("encode",new StringEncoder());  
  32.                 pipeline.addLast("decode",new StringDecoder());  
  33.                 pipeline.addLast("handler",new DiscardServerHandler());  
  34.                 return pipeline;  
  35.             }  
  36.         });  
  37.         bootstrap.setOption("child.tcpNoDelay", true);  
  38.         bootstrap.setOption("child.keepAlive", true);  
  39.         bootstrap.bind(new InetSocketAddress(8080));  
  40.     }  
  41. }  



Java代码    收藏代码
  1. package netty;  
  2.   
  3. import org.jboss.netty.buffer.ChannelBuffer;  
  4. import org.jboss.netty.buffer.ChannelBuffers;  
  5. import org.jboss.netty.channel.*;  
  6.   
  7. /** 
  8.  * Created by IntelliJ IDEA. 
  9.  * User: flychao88 
  10.  * Date: 12-6-6 
  11.  * Time: 上午10:10 
  12.  * To change this template use File | Settings | File Templates. 
  13.  */  
  14. public class DiscardServerHandler extends SimpleChannelUpstreamHandler  {  
  15.     @Override  
  16.     public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {  
  17.        System.out.println("服务器接收1:"+e.getMessage());  
  18.     }  
  19.       
  20.     @Override  
  21.     public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {  
  22.         e.getCause().printStackTrace();  
  23.         Channel ch = e.getChannel();  
  24.         ch.close();  
  25.     }  
  26. }  




Java代码    收藏代码
  1. package netty;  
  2.   
  3. import org.jboss.netty.bootstrap.ClientBootstrap;  
  4. import org.jboss.netty.channel.ChannelFactory;  
  5. import org.jboss.netty.channel.ChannelPipeline;  
  6. import org.jboss.netty.channel.ChannelPipelineFactory;  
  7. import org.jboss.netty.channel.Channels;  
  8. import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;  
  9. import org.jboss.netty.handler.codec.string.StringDecoder;  
  10. import org.jboss.netty.handler.codec.string.StringEncoder;  
  11.   
  12. import java.net.InetSocketAddress;  
  13. import java.util.concurrent.Executors;  
  14.   
  15. /** 
  16.  * Created by IntelliJ IDEA. 
  17.  * User: flychao88 
  18.  * Date: 12-6-6 
  19.  * Time: 上午10:21 
  20.  * To change this template use File | Settings | File Templates. 
  21.  */  
  22. public class TimeClient {  
  23.     public static void main(String[] args) throws Exception {  
  24.           
  25.         ChannelFactory factory = new NioClientSocketChannelFactory(  
  26.             Executors.newCachedThreadPool(),  
  27.             Executors.newCachedThreadPool());  
  28.         ClientBootstrap bootstrap = new ClientBootstrap(factory);  
  29.         bootstrap.setPipelineFactory(new ChannelPipelineFactory() {  
  30.             public ChannelPipeline getPipeline() {  
  31.                 ChannelPipeline pipeline = Channels.pipeline();  
  32.                 pipeline.addLast("encode",new StringEncoder());  
  33.                 pipeline.addLast("decode",new StringDecoder());  
  34.                 pipeline.addLast("handler",new TimeClientHandler());  
  35.                 return pipeline;  
  36.             }  
  37.         });  
  38.         bootstrap.setOption("tcpNoDelay" , true);  
  39.         bootstrap.setOption("keepAlive", true);  
  40.         bootstrap.connect (new InetSocketAddress("127.0.0.1", 8080));  
  41.     }  
  42. }  




Java代码    收藏代码
  1. package netty;  
  2.   
  3. /** 
  4.  * Created by IntelliJ IDEA. 
  5.  * User: flychao88 
  6.  * Date: 12-6-6 
  7.  * Time: 上午10:22 
  8.  * To change this template use File | Settings | File Templates. 
  9.  */  
  10. import org.jboss.netty.buffer.ChannelBuffer;  
  11. import org.jboss.netty.buffer.ChannelBuffers;  
  12. import org.jboss.netty.channel.*;  
  13.   
  14. import java.util.Date;  
  15.   
  16.   
  17. public class TimeClientHandler extends SimpleChannelUpstreamHandler  {  
  18.     @Override  
  19.     public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) {  
  20.         e.getChannel().write("abcd");  
  21.     }  
  22.   
  23.     @Override  
  24.     public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {  
  25.         e.getChannel().close();  
  26.     }  
  27.       
  28.     @Override  
  29.     public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {  
  30.         e.getCause().printStackTrace();  
  31.         e.getChannel().close();  
  32.     }  
  33. }  

二、服务器的启动和客服端connect的过程 

1 、服务器启动 
bootstrap.bind(…)-> 触发ServerSocketChannel.open()的事件(sendupstream)->捕捉open事件,channel.bind-> Channels.bind(…)  发起bind命令(sendDownstream)-> PipelineSink进行处理-> 使用socket进行bind,启动boss进程。 
Boostrap.bind 方法包含两个参数 NioServerSocketChannelFactory、ChannelPipelineFactory。NioServerSocketChannelFactory包含连个线程池bossExecutor和workerExecutor,workerExecutor: 包含缺省为处理器个数×2个NioWorker进程。 
2、服务器处理连接 
Boss启动后,在监听accept事件, 将捕获到的事件作为一个task放到一个niowork进程 
的registerTaskQueue队列中。 
3、服务器端接收并处理数据 
  NioWorker.run()->nioworker. processSelectedKeys()->Nioworker. Read()将从SocketChannel读取的数据封装成ChannelBuffer ->调用fireMessageReceived(channel,buffer)产生upstream事件 –> 由注册到Pipeline中的Hanlder进行处理 
4、客户端connection 
同服务器启动一样也需要创建一个NioClientSocketChannelFactory和一个ChannelPipelineFactory。  同服务器端不同的是client端的boss进程不要监听,它将本地发出的建立的链接的请求封装成task放入一个registerTaskQueue,boss负责消费Queue队列中的消息。 

三、来自:http://fbi.taobao.org/?p=86

Server端整体来说和client端很像,在创建NioServerSocketChannelFactory时需要指定两种类型的Thread,一种是boss,还有一种是worker;每个监听端口都有自己的boss线程,比如你在服务端开启了80和443端口,那将会有两个boss线程,一旦有连接创建,即accept,那就会交给worker thread处理。

在Server端启动过程中事件发生顺序:

UpStream.ChannelState.OPEN—–>DownStream.ChannelState.BOUND(需要绑定)

——–>UpStream.ChannelState.BOUND(已经绑定)——>DownStream.CONNECTED(需要连接,应该是注册Selector的意思)——->UpStream.CONNECTED(连接成功)

Netty在处理开启ServerSocket监听上,使用了Pipeline&Handlers的方式,会在内部构建一个DefaultPipeline并且加入一个UpStreamHandler(Bind)

  1. ChannelHandler binder = new Binder(localAddress, futureQueue);
  2. ChannelHandler parentHandler = getParentHandler();
  3.  
  4. ChannelPipeline bossPipeline = pipeline();
  5. bossPipeline.addLast(“binder”, binder);

创建Channel之后发出一个UpStream.ChannelState.OPEN事件

  1. channel.getPipeline().sendUpstream(
  2.                 new UpstreamChannelStateEvent(
  3.                         channel, ChannelState.OPEN, Boolean.TRUE))

 

前面注册的BinderHandler会处理这个事件,并且发出一个DownStreamEvent的Bind事件,表明需要将该Channel绑定至指定的地址,接着就会执行绑定逻辑

 

  1. NioServerSocketPipelineSink
  2.  
  3. private void bind(
  4.             NioServerSocketChannel channel, ChannelFuture future,
  5.             SocketAddress localAddress) {
  6.  
  7.         boolean bound = false;
  8.         boolean bossStarted = false;
  9.         try {
  10.              channel.socket.socket().bind(localAddress,                 
  11.         channel.getConfig().getBacklog());
  12.             bound = true;
  13.  
  14.             future.setSuccess();
  15.              fireChannelBound(channel, channel.getLocalAddress());
  16.  
  17.     // 取出一个boss线程,然后交给Boss类去处理
  18.             Executor bossExecutor =
  19.                 ((NioServerSocketChannelFactory) channel.getFactory()).bossExecutor;
  20.             DeadLockProofWorker.start(bossExecutor, 
  21.                     new ThreadRenamingRunnable(new Boss(channel), 
  22.                             “New I/O server boss #” + id + “ (“ + channel + ‘)’));
  23.             bossStarted = true;
  24.         } catch (Throwable t) {
  25.             future.setFailure(t);
  26.             fireExceptionCaught(channel, t);
  27.         } finally {
  28.             if (!bossStarted >> bound) {
  29.                 close(channel, future);
  30.             }
  31.         }
  32.     }

在Boss类中进行select register和client类似, ServerChannel只注册Accept事件:

  1. Boss(NioServerSocketChannel channel) throws IOException {
  2.             this.channel = channel;
  3.             selector = Selector.open();
  4.  
  5.             boolean registered = false;
  6.             try {
  7.                 channel.socket.register(selector, SelectionKey.OP_ACCEPT);
  8.                 registered = true;
  9.             } finally {
  10.                 if (!registered) {
  11.                     closeSelector();
  12.                 }
  13.             }
  14.  
  15.             channel.selector = selector;
  16.         }

当有新的连接建立,会交给workpool去处理,boss只负责accept到新的连接,新的SocketChannel会被注册到一个work中去,这里以后就和client完全一样了

  1.  public void run() {
  2.             final Thread currentThread = Thread.currentThread();
  3.                       channel.shutdownLock.lock();
  4.             try {
  5.                 for (;;) {
  6.                     try {
  7.                         if (selector.select(1000) > 0) {
  8.                             selector.selectedKeys().clear();
  9.                         }
  10.  
  11.                         // accept connections in a for loop until no new connection is ready
  12.                         for (;;) {
  13.                             SocketChannel acceptedSocket = channel.socket.accept();
  14.                             if (acceptedSocket == null) {
  15.                                 break;
  16.                             }
  17.                             registerAcceptedChannel(acceptedSocket, currentThread);
  18.                             
  19.                         }
  20.                         ……
  21.                     }
  22.                 }
  23.             } finally {
  24.                 channel.shutdownLock.unlock();
  25.                 closeSelector();
  26.             }
  27.         }
  28.  
  29. private void registerAcceptedChannel(SocketChannel acceptedSocket, Thread currentThread) {
  30.             try {
  31.                 ChannelPipeline pipeline =
  32.                     channel.getConfig().getPipelineFactory().getPipeline();
  33.                  // 选择一个work,然后将新创建的连接(channel)注册到work上
  34.                 NioWorker worker = nextWorker();
  35.                 
  36.                 NioAcceptedSocketChannel acceptChannel=new NioAcceptedSocketChannel(
  37.                         channel.getFactory(), pipeline, channel,
  38.                         NioServerSocketPipelineSink.this, acceptedSocket,
  39.                         worker, currentThread);
  40.                 worker.register(acceptChannel, null);
  41.             } catch (Exception e) {
  42.                 ……
  43.             }
  44.         }

这里有几个注意点:

1.最好不要让bossWork和works同用一组ThreadPool,因为那样boosWork不仅要处理连接,某些Channel还会直接注册到boss的select上,那样,boss就不仅仅是boss了,比worker还苦逼

2.因为schannel的io read和handler在一个线程上,因此如果handler的处理时间太长会使得不能及时处理后续到的事件(这个有另外的解决办法)

猜你喜欢

转载自liao492006.iteye.com/blog/1820687