Thoroughly learn to use epoll (VI): Some Problems ET summary

Editor: wind over the summer ChinaUnix blog

Why 6.1 ET mode to be set to work in non-blocking mode

    Because reading and writing in ET mode until the need has been read or write error (for reading, when the actual number of bytes read is smaller than the requested number of bytes that can be stopped), and if you file descriptor, if not non-blocking, that this has been read or write is bound to be a blockage in the final. This can not be blocked on epoll_wait, resulting in other file descriptor task starve.

The difference between ET and LT 6.2

LT: trigger level, the efficiency will be lower than ET trigger, especially in large concurrent case of large flow. But the relatively low coding requirements for LT, the problem is not likely to occur. Performance on the LT-mode service is written: as long as the data has not been acquired, the kernel will continue to inform you, so do not worry about missing the event.

ET: edge trigger, the efficiency is very high, in the case of concurrent, large flow, a lot less epoll system calls than LT, and therefore high efficiency. But the high programming requirements, requires careful handling each request, or prone to loss situations events.

 

Here is an Liezi to illustrate the difference between LT and ET (both non-blocking mode, blocking is not to say, the efficiency is too low):

Using LT mode, if you accept the return call there can be established immediately in the current connection, and then wait for the next epoll_wait notification, and select the same.

But for ET, if accpet return call there, in addition to establishing the current outer join, can not immediately epoll_wait also it needs to continue circulation accpet, until the return -1 with errno == EAGAIN,

In essence: Compared with LT, ET model by reducing system calls to improve the parallel efficiency.

6.3 a backstage interview questions developed by Tencent


    Use Linux epoll model level (LT) trigger mode, when the socket can be written, it will stop triggering event socket can write, how to deal with?

The first most common way:
the time required to write data to the socket socket and only then join epoll, waiting can write event. After receiving the write event, write or send calls to send data. When all the data are finished, the socket is removed epoll.

The disadvantage of this approach is that, even if the transmission data is small, we should also join socket epoll, epoll removed after writing in, the cost of a certain operation.

An improved way:
at the beginning not to join socket epoll, need to write data to the socket, and direct calls to write or send send data. If the return EAGAIN, the socket epoll added, driven epoll write data, the entire data transmission is completed, and then removed epoll.

The advantage of this approach is: not much data can be avoided when epoll event processing, and improve efficiency.

6.4 Under what circumstances ET

Very simple, when you want to improve program efficiency.

Attaching a last epoll instance:

Click ( here) folded or unfolded

  1. #include <sys/socket.h>
  2. #include <sys/wait.h>
  3. #include <netinet/in.h>
  4. #include <netinet/tcp.h>
  5. #include <sys/epoll.h>
  6. #include <sys/sendfile.h>
  7. #include <sys/stat.h>
  8. #include <unistd.h>
  9. #include <stdio.h>
  10. #include <stdlib.h>
  11. #include <string.h>
  12. #include <strings.h>
  13. #include <fcntl.h>
  14. #include <errno.h>
  15. #define MAX_EVENTS 10
  16. #define PORT 8080
  17. // set socket connection is non-blocking mode
  18. void setnonblocking(int sockfd) {
  19.     int opts;
  20.     opts = fcntl(sockfd, F_GETFL);
  21.     if(opts < 0) {
  22.         perror("fcntl(F_GETFL)\n");
  23.         exit(1);
  24.     }
  25.     opts = (opts | O_NONBLOCK);
  26.     if(fcntl(sockfd, F_SETFL, opts) < 0) {
  27.         perror("fcntl(F_SETFL)\n");
  28.         exit(1);
  29.     }
  30. }
  31. int main () {
  32.     struct epoll_event ev, events [MAX_EVENTS]; // ev responsible for adding events, events receive a return event
  33.     int addrlen, listenfd, conn_sock, nfds, epfd, fd, i, nread, n;
  34.     struct sockaddr_in local, remote;
  35.     char buf[BUFSIZ];
  36.     // Create listen socket
  37.     if( (listenfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
  38.         perror("sockfd\n");
  39.         exit(1);
  40.     }
  41.     setnonblocking (listenfd); // listenfd nonblocking set [1]
  42.     bzero(&local, sizeof(local));
  43.     local.sin_family = AF_INET;
  44.     local.sin_addr.s_addr = htonl(INADDR_ANY);;
  45.     local.sin_port = htons(PORT);
  46.     if( bind(listenfd, (struct sockaddr *) &local, sizeof(local)) < 0) {
  47.         perror("bind\n");
  48.         exit(1);
  49.     }
  50.     listen(listenfd, 20);
  51.     epfd = epoll_create(MAX_EVENTS);
  52.     if (epfd == -1) {
  53.         perror("epoll_create");
  54.         exit(EXIT_FAILURE);
  55.     }
  56.     ev.events = EPOLLIN;
  57.     ev.data.fd = listenfd;
  58.     if (epoll_ctl(epfd, EPOLL_CTL_ADD, listenfd, &ev) == -1) {//监听listenfd
  59.         perror("epoll_ctl: listen_sock");
  60.         exit(EXIT_FAILURE);
  61.     }
  62.     for (;;) {
  63.         nfds = epoll_wait(epfd, events, MAX_EVENTS, -1);
  64.         if (nfds == -1) {
  65.             perror("epoll_pwait");
  66.             exit(EXIT_FAILURE);
  67.         }
  68.         for (i = 0; i < nfds; ++i) {
  69.             fd = events[i].data.fd;
  70.             if (fd == listenfd) {
  71.                 while ((conn_sock = accept(listenfd,(struct sockaddr *) &remote,
  72.                                 (size_t *)&addrlen)) > 0) {
  73.                     setnonblocking (conn_sock); // Set the following ET mode, to set the non-blocking
  74.                     ev.events = EPOLLIN | EPOLLET;
  75.                     ev.data.fd = conn_sock;
  76.                     if (epoll_ctl(epfd, EPOLL_CTL_ADD, conn_sock, &ev) == -1) {//读监听
  77.                         perror ( "epoll_ctl: add"); // connecting socket
  78.                         exit(EXIT_FAILURE);
  79.                     }
  80.                 }
  81.                 if (conn_sock == -1) {
  82.                     if (errno != EAGAIN && errno != ECONNABORTED
  83.                             && errno != EPROTO && errno != EINTR)
  84.                         perror("accept");
  85.                 }
  86.                 continue;
  87.             }
  88.             if (events[i].events & EPOLLIN) {
  89.                 n = 0;
  90.                 while ((nread = read (fd, buf + n, BUFSIZ-1))> 0) {// read next has been read ET
  91.                     n += nread;
  92.                 }
  93.                 if (nread == -1 && errno != EAGAIN) {
  94.                     perror("read error");
  95.                 }
  96.                 ev.data.fd = fd;
  97.                 ev.events = events[i].events | EPOLLOUT; //MOD OUT
  98.                 if (epoll_ctl(epfd, EPOLL_CTL_MOD, fd, &ev) == -1) {
  99.                     perror("epoll_ctl: mod");
  100.                 }
  101.             }
  102.             if (events[i].events & EPOLLOUT) {
  103.               sprintf(buf, "HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\nHello World", 11);
  104.                 int nwrite, data_size = strlen(buf);
  105.                 n = data_size;
  106.                 while (n > 0) {
  107.                     nwrite = write (fd, buf + data_size - n, n); // ET the data to be written has been written
  108.                     if (nwrite < n) {
  109.                         if (nwrite == -1 && errno != EAGAIN) {
  110.                             perror("write error");
  111.                         }
  112.                         break;
  113.                     }
  114.                     n - = nwrite;
  115.                 }
  116.                 close(fd);
  117.             }
  118.         }
  119.     }
  120.     return 0;
  121. }

Guess you like

Origin www.cnblogs.com/abelchao/p/11703795.html