服务器三:多线程epoll

  1 #include <fcntl.h>
  2 #include <sys/socket.h>
  3 #include <netinet/in.h>
  4 #include <arpa/inet.h>
  5 #include <signal.h>
  6 #include <fcntl.h>
  7 #include <sys/wait.h>
  8 #include <sys/epoll.h>
  9 #include <stdlib.h>
 10 #include <stdio.h>
 11 #include <errno.h>
 12 #include <string.h>
 13 #include <vector>
 14 #include <algorithm>
 15 #include <iostream>
 16 #include <pthread.h>
 17 #include <sys/types.h>//包含pthread_self
 18 #include <unistd.h>//包含syscall
 19 #include <sys/syscall.h>
 20 
 21 #define gettid() syscall(__NR_gettid)
 22 
 23 
 24 typedef std::vector<struct epoll_event> EventList;
 25 
 26 #define ERR_EXIT(m) \
 27     do \
 28 { \
 29     perror(m); \
 30     exit(EXIT_FAILURE); \
 31     } while(0)
 32 
 33 void* handleMessage(void* para)
 34 {
 35     char recvBuf[1024] = {0};
 36     int ret = 999;
 37     int socketfd = *(int *)para;
 38 
 39     while(true)
 40     {
 41         ret = read(socketfd, recvBuf, 1024);// 接受客户端消息
 42         if(ret < 0)
 43         {
 44             break;// 这里表示读取数据出问题.
 45             //由于是非阻塞的模式,所以当errno为EAGAIN时,表示当前缓冲区已无数据可//读在这里就当作是该次事件已处理过。
 46             if(errno == EAGAIN)
 47             {
 48                 printf("EAGAIN\n");
 49                 break;
 50             }
 51             else
 52             {
 53                 printf("read error! errno:%d\n", errno);
 54                 ret = 0;
 55                 break;
 56             }
 57         }
 58         else if(ret == 0)
 59         {
 60             // 这里表示对端的socket已正常关闭.
 61             break;
 62         }
 63         else
 64         {
 65             pthread_t tid1 = pthread_self();//用户态ID, 与pthread_create创建出来的相同
 66             long int tid2 = gettid();//内核态ID
 67             std::cout << tid1 << " recive data: " << recvBuf;
 68             std::cout << tid2 << " recive data: " << recvBuf << std::endl;
 69             write(socketfd, recvBuf, strlen(recvBuf));
 70             break;
 71         }
 72     }
 73     if(ret == 0)
 74         return para;
 75     return NULL;
 76 }
 77 
 78 int main(void)
 79 {
 80     //TCP是全双工的信道, 可以看作两条单工信道, TCP连接两端的两个端点各负责一条. 当对端调用close时, 虽然本意是关闭整个两条信道,
 81     //但本端只是收到FIN包. 按照TCP协议的语义, 表示对端只是关闭了其所负责的那一条单工信道, 仍然可以继续接收数据. 也就是说, 因为TCP协议的限制,
 82     //一个端点无法获知对端的socket是调用了close还是shutdown.
 83     //对一个已经收到FIN包的socket调用read方法,
 84     //如果接收缓冲已空, 则返回0, 这就是常说的表示连接关闭. 但第一次对其调用write方法时, 如果发送缓冲没问题, 会返回正确写入(发送).
 85     //但发送的报文会导致对端发送RST报文, 因为对端的socket已经调用了close, 完全关闭, 既不发送, 也不接收数据. 所以,
 86     //第二次调用write方法(假设在收到RST之后), 会生成SIGPIPE信号, 导致进程退出.
 87     //为了避免进程退出, 可以捕获SIGPIPE信号, 或者忽略它, 给它设置SIG_IGN信号处理函数:
 88     //这样, 第二次调用write方法时, 会返回-1, 同时errno置为SIGPIPE. 程序便能知道对端已经关闭.
 89     signal(SIGPIPE, SIG_IGN);//防止进程退出
 90     //忽略SIGCHLD信号,这常用于并发服务器的性能的一个技巧
 91     //因为并发服务器常常fork很多子进程,子进程终结之后需要
 92     //服务器进程去wait清理资源。如果将此信号的处理方式设为
 93     //忽略,可让内核把僵尸子进程转交给init进程去处理,省去了
 94     //大量僵尸进程占用系统资源。(Linux Only)
 95     signal(SIGCHLD, SIG_IGN);
 96 
 97     int idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
 98     int listenfd;
 99     //if ((listenfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0)
100     if ((listenfd = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP)) < 0)
101         ERR_EXIT("socket");
102 
103     struct sockaddr_in servaddr;
104     memset(&servaddr, 0, sizeof(servaddr));
105     servaddr.sin_family = AF_INET;
106     servaddr.sin_port = htons(5188);
107     servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
108 
109     int on = 1;
110     if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0)
111         ERR_EXIT("setsockopt");
112 
113     if (bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0)
114         ERR_EXIT("bind");
115     if (listen(listenfd, SOMAXCONN) < 0)
116         ERR_EXIT("listen");
117 
118     //create epoll object
119     int epollfd;
120     epollfd = epoll_create1(EPOLL_CLOEXEC);
121 
122     //add EPOLLIN event to epoll
123     struct epoll_event event;
124     event.data.fd = listenfd;
125     event.events = EPOLLIN | EPOLLET;
126     epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &event);
127 
128     EventList events(16);
129     struct sockaddr_in peeraddr;
130     socklen_t peerlen;
131     int connfd;
132 
133     std::vector<int> clients;
134 
135     int nready;
136     while (1)
137     {
138         //return active event
139         nready = epoll_wait(epollfd, &*events.begin(), static_cast<int>(events.size()), -1);
140         if (nready == -1)
141         {
142             if (errno == EINTR)
143                 continue;
144 
145             ERR_EXIT("epoll_wait");
146         }
147         if (nready == 0)    // nothing happended
148             continue;
149 
150         //double capacity
151         if ((size_t)nready == events.size())
152             events.resize(events.size()*2);
153 
154         //treat all active event
155         for (int i = 0; i < nready; ++i)
156         {
157             //如果是主socket的事件的话,则表示有新连接进入了,进行新连接的处理
158             if (events[i].data.fd == listenfd)
159             {
160                 peerlen = sizeof(peeraddr);
161                 connfd = ::accept4(listenfd, (struct sockaddr*)&peeraddr,
162                                    &peerlen, SOCK_NONBLOCK | SOCK_CLOEXEC);
163 
164                 if (connfd == -1)
165                 {
166                     //防止文件句柄超过最大数量
167                     if (errno == EMFILE)
168                     {
169                         close(idlefd);
170                         idlefd = accept(listenfd, NULL, NULL);
171                         close(idlefd);
172                         idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
173                         continue;
174                     }
175                     else
176                         ERR_EXIT("accept4");
177                 }
178 
179 
180                 std::cout<<"ip="<<inet_ntoa(peeraddr.sin_addr)<<
181                            " port="<<ntohs(peeraddr.sin_port)<<std::endl;
182 
183                 clients.push_back(connfd);
184 
185                 event.data.fd = connfd;
186                 event.events = EPOLLIN | EPOLLET;
187                 epoll_ctl(epollfd, EPOLL_CTL_ADD, connfd, &event);
188             }
189             else if (events[i].events & EPOLLIN)
190             {
191                 connfd = events[i].data.fd;
192                 if (connfd < 0)
193                     continue;
194 
195                 pthread_attr_t attr;
196                 pthread_t threadId;
197 
198                 /*初始化属性值,均设为默认值, 把线程设置为系统级线程*/
199                 pthread_attr_init(&attr);
200                 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
201                 /* 设置线程为分离属性*/
202                 //pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
203 
204                 if(pthread_create(&threadId, &attr, handleMessage, (void*)&(events[i].data.fd)))
205                 {
206                     perror("pthread_creat error!");
207                     exit(-1);
208                 }
209                 std::cout << "Create thread: " << threadId << std::endl;
210 
211                 void* rmpfd = NULL;
212                 pthread_join(threadId, &rmpfd);
213 
214                 if(rmpfd != NULL)
215                 {
216                     int rmfd = *(int*)rmpfd;
217                     std::cout<<"client close"<<std::endl;
218                     close(rmfd);
219                     event = events[i];
220                     epoll_ctl(epollfd, EPOLL_CTL_DEL, rmfd, &event);
221                     clients.erase(std::remove(clients.begin(), clients.end(), rmfd), clients.end());
222                     continue;
223                 }
224             }
225 
226         }
227     }
228 
229     return 0;
230 }

猜你喜欢

转载自www.cnblogs.com/venjin/p/9184586.html